Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module gluon.dal

    1  #!/bin/env python 
    2  # -*- coding: utf-8 -*- 
    3   
    4  """ 
    5  This file is part of the web2py Web Framework 
    6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
    7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
    8   
    9  Thanks to 
   10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
   11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
   12      * Denes 
   13      * Chris Clark 
   14      * clach05 
   15      * Denes Lengyel 
   16      * and many others who have contributed to current and previous versions 
   17   
   18  This file contains the DAL support for many relational databases, 
   19  including: 
   20  - SQLite & SpatiaLite 
   21  - MySQL 
   22  - Postgres 
   23  - Firebird 
   24  - Oracle 
   25  - MS SQL 
   26  - DB2 
   27  - Interbase 
   28  - Ingres 
   29  - Informix (9+ and SE) 
   30  - SapDB (experimental) 
   31  - Cubrid (experimental) 
   32  - CouchDB (experimental) 
   33  - MongoDB (in progress) 
   34  - Google:nosql 
   35  - Google:sql 
   36  - Teradata 
   37  - IMAP (experimental) 
   38   
   39  Example of usage: 
   40   
   41  >>> # from dal import DAL, Field 
   42   
   43  ### create DAL connection (and create DB if it doesn't exist) 
   44  >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'), 
   45  ... folder=None) 
   46   
   47  ### define a table 'person' (create/alter as necessary) 
   48  >>> person = db.define_table('person',Field('name','string')) 
   49   
   50  ### insert a record 
   51  >>> id = person.insert(name='James') 
   52   
   53  ### retrieve it by id 
   54  >>> james = person(id) 
   55   
   56  ### retrieve it by name 
   57  >>> james = person(name='James') 
   58   
   59  ### retrieve it by arbitrary query 
   60  >>> query = (person.name=='James') & (person.name.startswith('J')) 
   61  >>> james = db(query).select(person.ALL)[0] 
   62   
   63  ### update one record 
   64  >>> james.update_record(name='Jim') 
   65  <Row {'id': 1, 'name': 'Jim'}> 
   66   
   67  ### update multiple records by query 
   68  >>> db(person.name.like('J%')).update(name='James') 
   69  1 
   70   
   71  ### delete records by query 
   72  >>> db(person.name.lower() == 'jim').delete() 
   73  0 
   74   
   75  ### retrieve multiple records (rows) 
   76  >>> people = db(person).select(orderby=person.name, 
   77  ... groupby=person.name, limitby=(0,100)) 
   78   
   79  ### further filter them 
   80  >>> james = people.find(lambda row: row.name == 'James').first() 
   81  >>> print james.id, james.name 
   82  1 James 
   83   
   84  ### check aggregates 
   85  >>> counter = person.id.count() 
   86  >>> print db(person).select(counter).first()(counter) 
   87  1 
   88   
   89  ### delete one record 
   90  >>> james.delete_record() 
   91  1 
   92   
   93  ### delete (drop) entire database table 
   94  >>> person.drop() 
   95   
   96  Supported field types: 
   97  id string text boolean integer double decimal password upload 
   98  blob time date datetime 
   99   
  100  Supported DAL URI strings: 
  101  'sqlite://test.db' 
  102  'spatialite://test.db' 
  103  'sqlite:memory' 
  104  'spatialite:memory' 
  105  'jdbc:sqlite://test.db' 
  106  'mysql://root:none@localhost/test' 
  107  'postgres://mdipierro:password@localhost/test' 
  108  'postgres:psycopg2://mdipierro:password@localhost/test' 
  109  'postgres:pg8000://mdipierro:password@localhost/test' 
  110  'jdbc:postgres://mdipierro:none@localhost/test' 
  111  'mssql://web2py:none@A64X2/web2py_test' 
  112  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
  113  'oracle://username:password@database' 
  114  'firebird://user:password@server:3050/database' 
  115  'db2://DSN=dsn;UID=user;PWD=pass' 
  116  'firebird://username:password@hostname/database' 
  117  'firebird_embedded://username:password@c://path' 
  118  'informix://user:password@server:3050/database' 
  119  'informixu://user:password@server:3050/database' # unicode informix 
  120  'google:datastore' # for google app engine datastore 
  121  'google:sql' # for google app engine with sql (mysql compatible) 
  122  'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental 
  123  'imap://user:password@server:port' # experimental 
  124   
  125  For more info: 
  126  help(DAL) 
  127  help(Field) 
  128  """ 
  129   
  130  ################################################################################### 
  131  # this file only exposes DAL and Field 
  132  ################################################################################### 
  133   
  134  __all__ = ['DAL', 'Field'] 
  135   
  136  MAXCHARLENGTH = 2**15 # not quite but reasonable default max char length 
  137  DEFAULTLENGTH = {'string':512, 
  138                   'password':512, 
  139                   'upload':512, 
  140                   'text':2**15, 
  141                   'blob':2**31} 
  142  TIMINGSSIZE = 100 
  143  SPATIALLIBS = { 
  144      'Windows':'libspatialite', 
  145      'Linux':'libspatialite.so', 
  146      'Darwin':'libspatialite.dylib' 
  147      } 
  148  DEFAULT_URI = 'sqlite://dummy.db' 
  149   
  150  import re 
  151  import sys 
  152  import locale 
  153  import os 
  154  import types 
  155  import datetime 
  156  import threading 
  157  import time 
  158  import csv 
  159  import cgi 
  160  import copy 
  161  import socket 
  162  import logging 
  163  import base64 
  164  import shutil 
  165  import marshal 
  166  import decimal 
  167  import struct 
  168  import urllib 
  169  import hashlib 
  170  import uuid 
  171  import glob 
  172  import traceback 
  173  import platform 
  174   
  175  PYTHON_VERSION = sys.version_info[0] 
  176  if PYTHON_VERSION == 2: 
  177      import cPickle as pickle 
  178      import cStringIO as StringIO 
  179      import copy_reg as copyreg 
  180      hashlib_md5 = hashlib.md5 
  181      bytes, unicode = str, unicode 
  182  else: 
  183      import pickle 
  184      from io import StringIO as StringIO 
  185      import copyreg 
  186      long = int 
  187      hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8')) 
  188      bytes, unicode = bytes, str 
  189   
  190  CALLABLETYPES = (types.LambdaType, types.FunctionType, 
  191                   types.BuiltinFunctionType, 
  192                   types.MethodType, types.BuiltinMethodType) 
  193   
  194  TABLE_ARGS = set( 
  195      ('migrate','primarykey','fake_migrate','format','redefine', 
  196       'singular','plural','trigger_name','sequence_name', 
  197       'common_filter','polymodel','table_class','on_define',)) 
  198   
  199  SELECT_ARGS = set( 
  200      ('orderby', 'groupby', 'limitby','required', 'cache', 'left', 
  201       'distinct', 'having', 'join','for_update', 'processor','cacheable')) 
  202   
  203  ogetattr = object.__getattribute__ 
  204  osetattr = object.__setattr__ 
  205  exists = os.path.exists 
  206  pjoin = os.path.join 
  207   
  208  ################################################################################### 
  209  # following checks allow the use of dal without web2py, as a standalone module 
  210  ################################################################################### 
  211  try: 
  212      from utils import web2py_uuid 
  213  except (ImportError, SystemError): 
  214      import uuid 
215 - def web2py_uuid(): return str(uuid.uuid4())
216 217 try: 218 import portalocker 219 have_portalocker = True 220 except ImportError: 221 have_portalocker = False 222 223 try: 224 import serializers 225 have_serializers = True 226 except ImportError: 227 have_serializers = False 228 try: 229 import json as simplejson 230 except ImportError: 231 try: 232 import gluon.contrib.simplejson as simplejson 233 except ImportError: 234 simplejson = None 235 236 try: 237 import validators 238 have_validators = True 239 except (ImportError, SyntaxError): 240 have_validators = False 241 242 LOGGER = logging.getLogger("web2py.dal") 243 DEFAULT = lambda:0 244 245 GLOBAL_LOCKER = threading.RLock() 246 THREAD_LOCAL = threading.local() 247 248 # internal representation of tables with field 249 # <table>.<field>, tables and fields may only be [a-zA-Z0-9_] 250 251 REGEX_TYPE = re.compile('^([\w\_\:]+)') 252 REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*') 253 REGEX_W = re.compile('^\w+$') 254 REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.(\w+)$') 255 REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$') 256 REGEX_CLEANUP_FN = re.compile('[\'"\s;]+') 257 REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)') 258 REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 259 REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)") 260 REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') 261 REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 262 REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$') 263 REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$') 264 REGEX_QUOTES = re.compile("'[^']*'") 265 REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$') 266 REGEX_PASSWORD = re.compile('\://([^:@]*)\:') 267 REGEX_NOPASSWD = re.compile('(?<=\:)([^:@/]+)(?=@.+)') 268 269 # list of drivers will be built on the fly 270 # and lists only what is available 271 DRIVERS = [] 272 273 try: 274 from new import classobj 275 from google.appengine.ext import db as gae 276 from google.appengine.api import namespace_manager, rdbms 277 from google.appengine.api.datastore_types import Key ### for belongs on ID 278 from google.appengine.ext.db.polymodel import PolyModel 279 DRIVERS.append('google') 280 except ImportError: 281 pass 282 283 if not 'google' in DRIVERS: 284 285 try: 286 from pysqlite2 import dbapi2 as sqlite2 287 DRIVERS.append('SQLite(sqlite2)') 288 except ImportError: 289 LOGGER.debug('no SQLite drivers pysqlite2.dbapi2') 290 291 try: 292 from sqlite3 import dbapi2 as sqlite3 293 DRIVERS.append('SQLite(sqlite3)') 294 except ImportError: 295 LOGGER.debug('no SQLite drivers sqlite3') 296 297 try: 298 # first try contrib driver, then from site-packages (if installed) 299 try: 300 import contrib.pymysql as pymysql 301 # monkeypatch pymysql because they havent fixed the bug: 302 # https://github.com/petehunt/PyMySQL/issues/86 303 pymysql.ESCAPE_REGEX = re.compile("'") 304 pymysql.ESCAPE_MAP = {"'": "''"} 305 # end monkeypatch 306 except ImportError: 307 import pymysql 308 DRIVERS.append('MySQL(pymysql)') 309 except ImportError: 310 LOGGER.debug('no MySQL driver pymysql') 311 312 try: 313 import MySQLdb 314 DRIVERS.append('MySQL(MySQLdb)') 315 except ImportError: 316 LOGGER.debug('no MySQL driver MySQLDB') 317 318 319 try: 320 import psycopg2 321 from psycopg2.extensions import adapt as psycopg2_adapt 322 DRIVERS.append('PostgreSQL(psycopg2)') 323 except ImportError: 324 LOGGER.debug('no PostgreSQL driver psycopg2') 325 326 try: 327 # first try contrib driver, then from site-packages (if installed) 328 try: 329 import contrib.pg8000.dbapi as pg8000 330 except ImportError: 331 import pg8000.dbapi as pg8000 332 DRIVERS.append('PostgreSQL(pg8000)') 333 except ImportError: 334 LOGGER.debug('no PostgreSQL driver pg8000') 335 336 try: 337 import cx_Oracle 338 DRIVERS.append('Oracle(cx_Oracle)') 339 except ImportError: 340 LOGGER.debug('no Oracle driver cx_Oracle') 341 342 try: 343 try: 344 import pyodbc 345 except ImportError: 346 try: 347 import contrib.pypyodbc as pyodbc 348 except Exception, e: 349 raise ImportError(str(e)) 350 DRIVERS.append('MSSQL(pyodbc)') 351 DRIVERS.append('DB2(pyodbc)') 352 DRIVERS.append('Teradata(pyodbc)') 353 except ImportError: 354 LOGGER.debug('no MSSQL/DB2/Teradata driver pyodbc') 355 356 try: 357 import Sybase 358 DRIVERS.append('Sybase(Sybase)') 359 except ImportError: 360 LOGGER.debug('no Sybase driver') 361 362 try: 363 import kinterbasdb 364 DRIVERS.append('Interbase(kinterbasdb)') 365 DRIVERS.append('Firebird(kinterbasdb)') 366 except ImportError: 367 LOGGER.debug('no Firebird/Interbase driver kinterbasdb') 368 369 try: 370 import fdb 371 DRIVERS.append('Firbird(fdb)') 372 except ImportError: 373 LOGGER.debug('no Firebird driver fdb') 374 ##### 375 try: 376 import firebirdsql 377 DRIVERS.append('Firebird(firebirdsql)') 378 except ImportError: 379 LOGGER.debug('no Firebird driver firebirdsql') 380 381 try: 382 import informixdb 383 DRIVERS.append('Informix(informixdb)') 384 LOGGER.warning('Informix support is experimental') 385 except ImportError: 386 LOGGER.debug('no Informix driver informixdb') 387 388 try: 389 import sapdb 390 DRIVERS.append('SQL(sapdb)') 391 LOGGER.warning('SAPDB support is experimental') 392 except ImportError: 393 LOGGER.debug('no SAP driver sapdb') 394 395 try: 396 import cubriddb 397 DRIVERS.append('Cubrid(cubriddb)') 398 LOGGER.warning('Cubrid support is experimental') 399 except ImportError: 400 LOGGER.debug('no Cubrid driver cubriddb') 401 402 try: 403 from com.ziclix.python.sql import zxJDBC 404 import java.sql 405 # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ 406 from org.sqlite import JDBC # required by java.sql; ensure we have it 407 zxJDBC_sqlite = java.sql.DriverManager 408 DRIVERS.append('PostgreSQL(zxJDBC)') 409 DRIVERS.append('SQLite(zxJDBC)') 410 LOGGER.warning('zxJDBC support is experimental') 411 is_jdbc = True 412 except ImportError: 413 LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC') 414 is_jdbc = False 415 416 try: 417 import ingresdbi 418 DRIVERS.append('Ingres(ingresdbi)') 419 except ImportError: 420 LOGGER.debug('no Ingres driver ingresdbi') 421 # NOTE could try JDBC....... 422 423 try: 424 import couchdb 425 DRIVERS.append('CouchDB(couchdb)') 426 except ImportError: 427 LOGGER.debug('no Couchdb driver couchdb') 428 429 try: 430 import pymongo 431 DRIVERS.append('MongoDB(pymongo)') 432 except: 433 LOGGER.debug('no MongoDB driver pymongo') 434 435 try: 436 import imaplib 437 DRIVERS.append('IMAP(imaplib)') 438 except: 439 LOGGER.debug('no IMAP driver imaplib') 440 441 PLURALIZE_RULES = [ 442 (re.compile('child$'), re.compile('child$'), 'children'), 443 (re.compile('oot$'), re.compile('oot$'), 'eet'), 444 (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), 445 (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), 446 (re.compile('sis$'), re.compile('sis$'), 'ses'), 447 (re.compile('man$'), re.compile('man$'), 'men'), 448 (re.compile('ife$'), re.compile('ife$'), 'ives'), 449 (re.compile('eau$'), re.compile('eau$'), 'eaux'), 450 (re.compile('lf$'), re.compile('lf$'), 'lves'), 451 (re.compile('[sxz]$'), re.compile('$'), 'es'), 452 (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), 453 (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), 454 (re.compile('$'), re.compile('$'), 's'), 455 ]
456 457 -def pluralize(singular, rules=PLURALIZE_RULES):
458 for line in rules: 459 re_search, re_sub, replace = line 460 plural = re_search.search(singular) and re_sub.sub(replace, singular) 461 if plural: return plural
462
463 -def hide_password(uri):
464 return REGEX_NOPASSWD.sub('******',uri)
465
466 -def OR(a,b):
467 return a|b
468
469 -def AND(a,b):
470 return a&b
471
472 -def IDENTITY(x): return x
473
474 -def varquote_aux(name,quotestr='%s'):
475 return name if REGEX_W.match(name) else quotestr % name
476 477 if 'google' in DRIVERS: 478 479 is_jdbc = False
480 481 - class GAEDecimalProperty(gae.Property):
482 """ 483 GAE decimal implementation 484 """ 485 data_type = decimal.Decimal 486
487 - def __init__(self, precision, scale, **kwargs):
488 super(GAEDecimalProperty, self).__init__(self, **kwargs) 489 d = '1.' 490 for x in range(scale): 491 d += '0' 492 self.round = decimal.Decimal(d)
493
494 - def get_value_for_datastore(self, model_instance):
495 value = super(GAEDecimalProperty, self)\ 496 .get_value_for_datastore(model_instance) 497 if value is None or value == '': 498 return None 499 else: 500 return str(value)
501
502 - def make_value_from_datastore(self, value):
503 if value is None or value == '': 504 return None 505 else: 506 return decimal.Decimal(value).quantize(self.round)
507
508 - def validate(self, value):
509 value = super(GAEDecimalProperty, self).validate(value) 510 if value is None or isinstance(value, decimal.Decimal): 511 return value 512 elif isinstance(value, basestring): 513 return decimal.Decimal(value) 514 raise gae.BadValueError("Property %s must be a Decimal or string."\ 515 % self.name)
516
517 ################################################################################### 518 # class that handles connection pooling (all adapters are derived from this one) 519 ################################################################################### 520 521 -class ConnectionPool(object):
522 523 POOLS = {} 524 check_active_connection = True 525 526 @staticmethod
527 - def set_folder(folder):
528 THREAD_LOCAL.folder = folder
529 530 # ## this allows gluon to commit/rollback all dbs in this thread 531
532 - def close(self,action='commit',really=True):
533 if action: 534 if callable(action): 535 action(self) 536 else: 537 getattr(self, action)() 538 # ## if you want pools, recycle this connection 539 if self.pool_size: 540 GLOBAL_LOCKER.acquire() 541 pool = ConnectionPool.POOLS[self.uri] 542 if len(pool) < self.pool_size: 543 pool.append(self.connection) 544 really = False 545 GLOBAL_LOCKER.release() 546 if really: 547 self.close_connection() 548 self.connection = None
549 550 @staticmethod
551 - def close_all_instances(action):
552 """ to close cleanly databases in a multithreaded environment """ 553 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 554 for db_uid, db_group in dbs: 555 for db in db_group: 556 if hasattr(db,'_adapter'): 557 db._adapter.close(action) 558 getattr(THREAD_LOCAL,'db_instances',{}).clear() 559 getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear() 560 if callable(action): 561 action(None) 562 return
563
564 - def find_or_make_work_folder(self):
565 """ this actually does not make the folder. it has to be there """ 566 self.folder = getattr(THREAD_LOCAL,'folder','') 567 568 # Creating the folder if it does not exist 569 if False and self.folder and not exists(self.folder): 570 os.mkdir(self.folder)
571
572 - def after_connection_hook(self):
573 """hook for the after_connection parameter""" 574 if callable(self._after_connection): 575 self._after_connection(self) 576 self.after_connection()
577
578 - def after_connection(self):
579 """ this it is supposed to be overloaded by adapters""" 580 pass
581
582 - def reconnect(self, f=None, cursor=True):
583 """ 584 this function defines: self.connection and self.cursor 585 (iff cursor is True) 586 if self.pool_size>0 it will try pull the connection from the pool 587 if the connection is not active (closed by db server) it will loop 588 if not self.pool_size or no active connections in pool makes a new one 589 """ 590 if getattr(self,'connection',None) != None: 591 return 592 if f is None: 593 f = self.connector 594 595 if not self.pool_size: 596 self.connection = f() 597 self.cursor = cursor and self.connection.cursor() 598 else: 599 uri = self.uri 600 POOLS = ConnectionPool.POOLS 601 while True: 602 GLOBAL_LOCKER.acquire() 603 if not uri in POOLS: 604 POOLS[uri] = [] 605 if POOLS[uri]: 606 self.connection = POOLS[uri].pop() 607 GLOBAL_LOCKER.release() 608 self.cursor = cursor and self.connection.cursor() 609 try: 610 if self.cursor and self.check_active_connection: 611 self.execute('SELECT 1;') 612 break 613 except: 614 pass 615 else: 616 GLOBAL_LOCKER.release() 617 self.connection = f() 618 self.cursor = cursor and self.connection.cursor() 619 break 620 self.after_connection_hook()
621
622 623 ################################################################################### 624 # this is a generic adapter that does nothing; all others are derived from this one 625 ################################################################################### 626 627 -class BaseAdapter(ConnectionPool):
628 native_json = False 629 driver = None 630 driver_name = None 631 drivers = () # list of drivers from which to pick 632 connection = None 633 maxcharlength = MAXCHARLENGTH 634 commit_on_alter_table = False 635 support_distributed_transaction = False 636 uploads_in_blob = False 637 can_select_for_update = True 638 639 TRUE = 'T' 640 FALSE = 'F' 641 types = { 642 'boolean': 'CHAR(1)', 643 'string': 'CHAR(%(length)s)', 644 'text': 'TEXT', 645 'json': 'TEXT', 646 'password': 'CHAR(%(length)s)', 647 'blob': 'BLOB', 648 'upload': 'CHAR(%(length)s)', 649 'integer': 'INTEGER', 650 'bigint': 'INTEGER', 651 'float':'DOUBLE', 652 'double': 'DOUBLE', 653 'decimal': 'DOUBLE', 654 'date': 'DATE', 655 'time': 'TIME', 656 'datetime': 'TIMESTAMP', 657 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 658 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 659 'list:integer': 'TEXT', 660 'list:string': 'TEXT', 661 'list:reference': 'TEXT', 662 # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference' 663 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT', 664 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 665 } 666
667 - def id_query(self, table):
668 return table._id != None
669
670 - def adapt(self, obj):
671 return "'%s'" % obj.replace("'", "''")
672
673 - def smart_adapt(self, obj):
674 if isinstance(obj,(int,float)): 675 return str(obj) 676 return self.adapt(str(obj))
677
678 - def integrity_error(self):
679 return self.driver.IntegrityError
680
681 - def operational_error(self):
682 return self.driver.OperationalError
683
684 - def file_exists(self, filename):
685 """ 686 to be used ONLY for files that on GAE may not be on filesystem 687 """ 688 return exists(filename)
689
690 - def file_open(self, filename, mode='rb', lock=True):
691 """ 692 to be used ONLY for files that on GAE may not be on filesystem 693 """ 694 if have_portalocker and lock: 695 fileobj = portalocker.LockedFile(filename,mode) 696 else: 697 fileobj = open(filename,mode) 698 return fileobj
699
700 - def file_close(self, fileobj):
701 """ 702 to be used ONLY for files that on GAE may not be on filesystem 703 """ 704 if fileobj: 705 fileobj.close()
706
707 - def file_delete(self, filename):
708 os.unlink(filename)
709
710 - def find_driver(self,adapter_args,uri=None):
711 if getattr(self,'driver',None) != None: 712 return 713 drivers_available = [driver for driver in self.drivers 714 if driver in globals()] 715 if uri: 716 items = uri.split('://',1)[0].split(':') 717 request_driver = items[1] if len(items)>1 else None 718 else: 719 request_driver = None 720 request_driver = request_driver or adapter_args.get('driver') 721 if request_driver: 722 if request_driver in drivers_available: 723 self.driver_name = request_driver 724 self.driver = globals().get(request_driver) 725 else: 726 raise RuntimeError("driver %s not available" % request_driver) 727 elif drivers_available: 728 self.driver_name = drivers_available[0] 729 self.driver = globals().get(self.driver_name) 730 else: 731 raise RuntimeError("no driver available %s" % str(self.drivers))
732 733
734 - def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8', 735 credential_decoder=IDENTITY, driver_args={}, 736 adapter_args={},do_connect=True, after_connection=None):
737 self.db = db 738 self.dbengine = "None" 739 self.uri = uri 740 self.pool_size = pool_size 741 self.folder = folder 742 self.db_codec = db_codec 743 self._after_connection = after_connection 744 class Dummy(object): 745 lastrowid = 1 746 def __getattr__(self, value): 747 return lambda *a, **b: []
748 self.connection = Dummy() 749 self.cursor = Dummy() 750
751 - def sequence_name(self,tablename):
752 return '%s_sequence' % tablename
753
754 - def trigger_name(self,tablename):
755 return '%s_sequence' % tablename
756
757 - def varquote(self,name):
758 return name
759
760 - def create_table(self, table, 761 migrate=True, 762 fake_migrate=False, 763 polymodel=None):
764 db = table._db 765 fields = [] 766 # PostGIS geo fields are added after the table has been created 767 postcreation_fields = [] 768 sql_fields = {} 769 sql_fields_aux = {} 770 TFK = {} 771 tablename = table._tablename 772 sortable = 0 773 types = self.types 774 for field in table: 775 sortable += 1 776 field_name = field.name 777 field_type = field.type 778 if isinstance(field_type,SQLCustomType): 779 ftype = field_type.native or field_type.type 780 elif field_type.startswith('reference'): 781 referenced = field_type[10:].strip() 782 if referenced == '.': 783 referenced = tablename 784 constraint_name = self.constraint_name(tablename, field_name) 785 if not '.' in referenced \ 786 and referenced != tablename \ 787 and hasattr(table,'_primarykey'): 788 ftype = types['integer'] 789 else: 790 if hasattr(table,'_primarykey'): 791 rtablename,rfieldname = referenced.split('.') 792 rtable = db[rtablename] 793 rfield = rtable[rfieldname] 794 # must be PK reference or unique 795 if rfieldname in rtable._primarykey or \ 796 rfield.unique: 797 ftype = types[rfield.type[:9]] % \ 798 dict(length=rfield.length) 799 # multicolumn primary key reference? 800 if not rfield.unique and len(rtable._primarykey)>1: 801 # then it has to be a table level FK 802 if rtablename not in TFK: 803 TFK[rtablename] = {} 804 TFK[rtablename][rfieldname] = field_name 805 else: 806 ftype = ftype + \ 807 types['reference FK'] % dict( 808 constraint_name = constraint_name, # should be quoted 809 foreign_key = '%s (%s)' % (rtablename, 810 rfieldname), 811 table_name = tablename, 812 field_name = field_name, 813 on_delete_action=field.ondelete) 814 else: 815 # make a guess here for circular references 816 if referenced in db: 817 id_fieldname = db[referenced]._id.name 818 elif referenced == tablename: 819 id_fieldname = table._id.name 820 else: #make a guess 821 id_fieldname = 'id' 822 ftype = types[field_type[:9]] % dict( 823 index_name = field_name+'__idx', 824 field_name = field_name, 825 constraint_name = constraint_name, 826 foreign_key = '%s (%s)' % (referenced, 827 id_fieldname), 828 on_delete_action=field.ondelete) 829 elif field_type.startswith('list:reference'): 830 ftype = types[field_type[:14]] 831 elif field_type.startswith('decimal'): 832 precision, scale = map(int,field_type[8:-1].split(',')) 833 ftype = types[field_type[:7]] % \ 834 dict(precision=precision,scale=scale) 835 elif field_type.startswith('geo'): 836 if not hasattr(self,'srid'): 837 raise RuntimeError('Adapter does not support geometry') 838 srid = self.srid 839 geotype, parms = field_type[:-1].split('(') 840 if not geotype in types: 841 raise SyntaxError( 842 'Field: unknown field type: %s for %s' \ 843 % (field_type, field_name)) 844 ftype = types[geotype] 845 if self.dbengine == 'postgres' and geotype == 'geometry': 846 # parameters: schema, srid, dimension 847 dimension = 2 # GIS.dimension ??? 848 parms = parms.split(',') 849 if len(parms) == 3: 850 schema, srid, dimension = parms 851 elif len(parms) == 2: 852 schema, srid = parms 853 else: 854 schema = parms[0] 855 ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype] 856 ftype = ftype % dict(schema=schema, 857 tablename=tablename, 858 fieldname=field_name, srid=srid, 859 dimension=dimension) 860 postcreation_fields.append(ftype) 861 elif not field_type in types: 862 raise SyntaxError('Field: unknown field type: %s for %s' % \ 863 (field_type, field_name)) 864 else: 865 ftype = types[field_type]\ 866 % dict(length=field.length) 867 if not field_type.startswith('id') and \ 868 not field_type.startswith('reference'): 869 if field.notnull: 870 ftype += ' NOT NULL' 871 else: 872 ftype += self.ALLOW_NULL() 873 if field.unique: 874 ftype += ' UNIQUE' 875 if field.custom_qualifier: 876 ftype += ' %s' % field.custom_qualifier 877 878 # add to list of fields 879 sql_fields[field_name] = dict( 880 length=field.length, 881 unique=field.unique, 882 notnull=field.notnull, 883 sortable=sortable, 884 type=str(field_type), 885 sql=ftype) 886 887 if field.notnull and not field.default is None: 888 # Caveat: sql_fields and sql_fields_aux 889 # differ for default values. 890 # sql_fields is used to trigger migrations and sql_fields_aux 891 # is used for create tables. 892 # The reason is that we do not want to trigger 893 # a migration simply because a default value changes. 894 not_null = self.NOT_NULL(field.default, field_type) 895 ftype = ftype.replace('NOT NULL', not_null) 896 sql_fields_aux[field_name] = dict(sql=ftype) 897 # Postgres - PostGIS: 898 # geometry fields are added after the table has been created, not now 899 if not (self.dbengine == 'postgres' and \ 900 field_type.startswith('geom')): 901 fields.append('%s %s' % (field_name, ftype)) 902 other = ';' 903 904 # backend-specific extensions to fields 905 if self.dbengine == 'mysql': 906 if not hasattr(table, "_primarykey"): 907 fields.append('PRIMARY KEY(%s)' % table._id.name) 908 other = ' ENGINE=InnoDB CHARACTER SET utf8;' 909 910 fields = ',\n '.join(fields) 911 for rtablename in TFK: 912 rfields = TFK[rtablename] 913 pkeys = db[rtablename]._primarykey 914 fkeys = [ rfields[k] for k in pkeys ] 915 fields = fields + ',\n ' + \ 916 types['reference TFK'] % dict( 917 table_name = tablename, 918 field_name=', '.join(fkeys), 919 foreign_table = rtablename, 920 foreign_key = ', '.join(pkeys), 921 on_delete_action = field.ondelete) 922 923 if hasattr(table,'_primarykey'): 924 query = "CREATE TABLE %s(\n %s,\n %s) %s" % \ 925 (tablename, fields, 926 self.PRIMARY_KEY(', '.join(table._primarykey)),other) 927 else: 928 query = "CREATE TABLE %s(\n %s\n)%s" % \ 929 (tablename, fields, other) 930 931 if self.uri.startswith('sqlite:///') \ 932 or self.uri.startswith('spatialite:///'): 933 path_encoding = sys.getfilesystemencoding() \ 934 or locale.getdefaultlocale()[1] or 'utf8' 935 dbpath = self.uri[9:self.uri.rfind('/')]\ 936 .decode('utf8').encode(path_encoding) 937 else: 938 dbpath = self.folder 939 940 if not migrate: 941 return query 942 elif self.uri.startswith('sqlite:memory')\ 943 or self.uri.startswith('spatialite:memory'): 944 table._dbt = None 945 elif isinstance(migrate, str): 946 table._dbt = pjoin(dbpath, migrate) 947 else: 948 table._dbt = pjoin( 949 dbpath, '%s_%s.table' % (table._db._uri_hash, tablename)) 950 951 if table._dbt: 952 table._loggername = pjoin(dbpath, 'sql.log') 953 logfile = self.file_open(table._loggername, 'a') 954 else: 955 logfile = None 956 if not table._dbt or not self.file_exists(table._dbt): 957 if table._dbt: 958 logfile.write('timestamp: %s\n' 959 % datetime.datetime.today().isoformat()) 960 logfile.write(query + '\n') 961 if not fake_migrate: 962 self.create_sequence_and_triggers(query,table) 963 table._db.commit() 964 # Postgres geom fields are added now, 965 # after the table has been created 966 for query in postcreation_fields: 967 self.execute(query) 968 table._db.commit() 969 if table._dbt: 970 tfile = self.file_open(table._dbt, 'w') 971 pickle.dump(sql_fields, tfile) 972 self.file_close(tfile) 973 if fake_migrate: 974 logfile.write('faked!\n') 975 else: 976 logfile.write('success!\n') 977 else: 978 tfile = self.file_open(table._dbt, 'r') 979 try: 980 sql_fields_old = pickle.load(tfile) 981 except EOFError: 982 self.file_close(tfile) 983 self.file_close(logfile) 984 raise RuntimeError('File %s appears corrupted' % table._dbt) 985 self.file_close(tfile) 986 if sql_fields != sql_fields_old: 987 self.migrate_table(table, 988 sql_fields, sql_fields_old, 989 sql_fields_aux, logfile, 990 fake_migrate=fake_migrate) 991 self.file_close(logfile) 992 return query
993
994 - def migrate_table( 995 self, 996 table, 997 sql_fields, 998 sql_fields_old, 999 sql_fields_aux, 1000 logfile, 1001 fake_migrate=False, 1002 ):
1003 db = table._db 1004 db._migrated.append(table._tablename) 1005 tablename = table._tablename 1006 def fix(item): 1007 k,v=item 1008 if not isinstance(v,dict): 1009 v=dict(type='unkown',sql=v) 1010 return k.lower(),v
1011 # make sure all field names are lower case to avoid 1012 # migrations because of case cahnge 1013 sql_fields = dict(map(fix,sql_fields.iteritems())) 1014 sql_fields_old = dict(map(fix,sql_fields_old.iteritems())) 1015 sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems())) 1016 if db._debug: 1017 logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields)) 1018 1019 keys = sql_fields.keys() 1020 for key in sql_fields_old: 1021 if not key in keys: 1022 keys.append(key) 1023 new_add = self.concat_add(tablename) 1024 1025 metadata_change = False 1026 sql_fields_current = copy.copy(sql_fields_old) 1027 for key in keys: 1028 query = None 1029 if not key in sql_fields_old: 1030 sql_fields_current[key] = sql_fields[key] 1031 if self.dbengine in ('postgres',) and \ 1032 sql_fields[key]['type'].startswith('geometry'): 1033 # 'sql' == ftype in sql 1034 query = [ sql_fields[key]['sql'] ] 1035 else: 1036 query = ['ALTER TABLE %s ADD %s %s;' % \ 1037 (tablename, key, 1038 sql_fields_aux[key]['sql'].replace(', ', new_add))] 1039 metadata_change = True 1040 elif self.dbengine in ('sqlite', 'spatialite'): 1041 if key in sql_fields: 1042 sql_fields_current[key] = sql_fields[key] 1043 metadata_change = True 1044 elif not key in sql_fields: 1045 del sql_fields_current[key] 1046 ftype = sql_fields_old[key]['type'] 1047 if self.dbengine in ('postgres',) \ 1048 and ftype.startswith('geometry'): 1049 geotype, parms = ftype[:-1].split('(') 1050 schema = parms.split(',')[0] 1051 query = [ "SELECT DropGeometryColumn ('%(schema)s', '%(table)s', '%(field)s');" % dict(schema=schema, table=tablename, field=key,) ] 1052 elif not self.dbengine in ('firebird',): 1053 query = ['ALTER TABLE %s DROP COLUMN %s;' 1054 % (tablename, key)] 1055 else: 1056 query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] 1057 metadata_change = True 1058 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 1059 and not (key in table.fields and 1060 isinstance(table[key].type, SQLCustomType)) \ 1061 and not sql_fields[key]['type'].startswith('reference')\ 1062 and not sql_fields[key]['type'].startswith('double')\ 1063 and not sql_fields[key]['type'].startswith('id'): 1064 sql_fields_current[key] = sql_fields[key] 1065 t = tablename 1066 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 1067 if not self.dbengine in ('firebird',): 1068 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 1069 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 1070 'ALTER TABLE %s DROP COLUMN %s;' % (t, key), 1071 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 1072 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 1073 'ALTER TABLE %s DROP COLUMN %s__tmp;' % (t, key)] 1074 else: 1075 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 1076 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 1077 'ALTER TABLE %s DROP %s;' % (t, key), 1078 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 1079 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 1080 'ALTER TABLE %s DROP %s__tmp;' % (t, key)] 1081 metadata_change = True 1082 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 1083 sql_fields_current[key] = sql_fields[key] 1084 metadata_change = True 1085 1086 if query: 1087 logfile.write('timestamp: %s\n' 1088 % datetime.datetime.today().isoformat()) 1089 db['_lastsql'] = '\n'.join(query) 1090 for sub_query in query: 1091 logfile.write(sub_query + '\n') 1092 if not fake_migrate: 1093 self.execute(sub_query) 1094 # Caveat: mysql, oracle and firebird do not allow multiple alter table 1095 # in one transaction so we must commit partial transactions and 1096 # update table._dbt after alter table. 1097 if db._adapter.commit_on_alter_table: 1098 db.commit() 1099 tfile = self.file_open(table._dbt, 'w') 1100 pickle.dump(sql_fields_current, tfile) 1101 self.file_close(tfile) 1102 logfile.write('success!\n') 1103 else: 1104 logfile.write('faked!\n') 1105 elif metadata_change: 1106 tfile = self.file_open(table._dbt, 'w') 1107 pickle.dump(sql_fields_current, tfile) 1108 self.file_close(tfile) 1109 1110 if metadata_change and \ 1111 not (query and self.dbengine in ('mysql','oracle','firebird')): 1112 db.commit() 1113 tfile = self.file_open(table._dbt, 'w') 1114 pickle.dump(sql_fields_current, tfile) 1115 self.file_close(tfile) 1116
1117 - def LOWER(self, first):
1118 return 'LOWER(%s)' % self.expand(first)
1119
1120 - def UPPER(self, first):
1121 return 'UPPER(%s)' % self.expand(first)
1122
1123 - def COUNT(self, first, distinct=None):
1124 return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \ 1125 % self.expand(first)
1126
1127 - def EXTRACT(self, first, what):
1128 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
1129
1130 - def EPOCH(self, first):
1131 return self.EXTRACT(first, 'epoch')
1132
1133 - def AGGREGATE(self, first, what):
1134 return "%s(%s)" % (what, self.expand(first))
1135
1136 - def JOIN(self):
1137 return 'JOIN'
1138
1139 - def LEFT_JOIN(self):
1140 return 'LEFT JOIN'
1141
1142 - def RANDOM(self):
1143 return 'Random()'
1144
1145 - def NOT_NULL(self, default, field_type):
1146 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
1147
1148 - def COALESCE(self, first, second):
1149 expressions = [self.expand(first)]+[self.expand(e) for e in second] 1150 return 'COALESCE(%s)' % ','.join(expressions)
1151
1152 - def COALESCE_ZERO(self, first):
1153 return 'COALESCE(%s,0)' % self.expand(first)
1154
1155 - def RAW(self, first):
1156 return first
1157
1158 - def ALLOW_NULL(self):
1159 return ''
1160
1161 - def SUBSTRING(self, field, parameters):
1162 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1163
1164 - def PRIMARY_KEY(self, key):
1165 return 'PRIMARY KEY(%s)' % key
1166
1167 - def _drop(self, table, mode):
1168 return ['DROP TABLE %s;' % table]
1169
1170 - def drop(self, table, mode=''):
1171 db = table._db 1172 if table._dbt: 1173 logfile = self.file_open(table._loggername, 'a') 1174 queries = self._drop(table, mode) 1175 for query in queries: 1176 if table._dbt: 1177 logfile.write(query + '\n') 1178 self.execute(query) 1179 db.commit() 1180 del db[table._tablename] 1181 del db.tables[db.tables.index(table._tablename)] 1182 db._remove_references_to(table) 1183 if table._dbt: 1184 self.file_delete(table._dbt) 1185 logfile.write('success!\n')
1186
1187 - def _insert(self, table, fields):
1188 if fields: 1189 keys = ','.join(f.name for f, v in fields) 1190 values = ','.join(self.expand(v, f.type) for f, v in fields) 1191 return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values) 1192 else: 1193 return self._insert_empty(table)
1194
1195 - def _insert_empty(self, table):
1196 return 'INSERT INTO %s DEFAULT VALUES;' % table
1197
1198 - def insert(self, table, fields):
1199 query = self._insert(table,fields) 1200 try: 1201 self.execute(query) 1202 except Exception: 1203 e = sys.exc_info()[1] 1204 if isinstance(e,self.integrity_error_class()): 1205 return None 1206 raise e 1207 if hasattr(table,'_primarykey'): 1208 return dict([(k[0].name, k[1]) for k in fields \ 1209 if k[0].name in table._primarykey]) 1210 id = self.lastrowid(table) 1211 if not isinstance(id,int): 1212 return id 1213 rid = Reference(id) 1214 (rid._table, rid._record) = (table, None) 1215 return rid
1216
1217 - def bulk_insert(self, table, items):
1218 return [self.insert(table,item) for item in items]
1219
1220 - def NOT(self, first):
1221 return '(NOT %s)' % self.expand(first)
1222
1223 - def AND(self, first, second):
1224 return '(%s AND %s)' % (self.expand(first), self.expand(second))
1225
1226 - def OR(self, first, second):
1227 return '(%s OR %s)' % (self.expand(first), self.expand(second))
1228
1229 - def BELONGS(self, first, second):
1230 if isinstance(second, str): 1231 return '(%s IN (%s))' % (self.expand(first), second[:-1]) 1232 elif not second: 1233 return '(1=0)' 1234 items = ','.join(self.expand(item, first.type) for item in second) 1235 return '(%s IN (%s))' % (self.expand(first), items)
1236
1237 - def REGEXP(self, first, second):
1238 "regular expression operator" 1239 raise NotImplementedError
1240
1241 - def LIKE(self, first, second):
1242 "case sensitive like operator" 1243 raise NotImplementedError
1244
1245 - def ILIKE(self, first, second):
1246 "case in-sensitive like operator" 1247 return '(%s LIKE %s)' % (self.expand(first), 1248 self.expand(second, 'string'))
1249
1250 - def STARTSWITH(self, first, second):
1251 return '(%s LIKE %s)' % (self.expand(first), 1252 self.expand(second+'%', 'string'))
1253
1254 - def ENDSWITH(self, first, second):
1255 return '(%s LIKE %s)' % (self.expand(first), 1256 self.expand('%'+second, 'string'))
1257
1258 - def CONTAINS(self, first, second, case_sensitive=False):
1259 if isinstance(second,Expression): 1260 field = self.expand(first) 1261 expr = self.expand(second,'string') 1262 if first.type.startswith('list:'): 1263 expr = 'CONCAT("|", %s, "|")' % expr 1264 elif not first.type in ('string', 'text', 'json'): 1265 raise RuntimeError("Expression Not Supported") 1266 return 'INSTR(%s,%s)' % (field, expr) 1267 else: 1268 if first.type in ('string', 'text', 'json'): 1269 key = '%'+str(second).replace('%','%%')+'%' 1270 elif first.type.startswith('list:'): 1271 key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' 1272 else: 1273 raise RuntimeError("Expression Not Supported") 1274 op = case_sensitive and self.LIKE or self.ILIKE 1275 return op(first,key)
1276
1277 - def EQ(self, first, second=None):
1278 if second is None: 1279 return '(%s IS NULL)' % self.expand(first) 1280 return '(%s = %s)' % (self.expand(first), 1281 self.expand(second, first.type))
1282
1283 - def NE(self, first, second=None):
1284 if second is None: 1285 return '(%s IS NOT NULL)' % self.expand(first) 1286 return '(%s <> %s)' % (self.expand(first), 1287 self.expand(second, first.type))
1288
1289 - def LT(self,first,second=None):
1290 if second is None: 1291 raise RuntimeError("Cannot compare %s < None" % first) 1292 return '(%s < %s)' % (self.expand(first), 1293 self.expand(second,first.type))
1294
1295 - def LE(self,first,second=None):
1296 if second is None: 1297 raise RuntimeError("Cannot compare %s <= None" % first) 1298 return '(%s <= %s)' % (self.expand(first), 1299 self.expand(second,first.type))
1300
1301 - def GT(self,first,second=None):
1302 if second is None: 1303 raise RuntimeError("Cannot compare %s > None" % first) 1304 return '(%s > %s)' % (self.expand(first), 1305 self.expand(second,first.type))
1306
1307 - def GE(self,first,second=None):
1308 if second is None: 1309 raise RuntimeError("Cannot compare %s >= None" % first) 1310 return '(%s >= %s)' % (self.expand(first), 1311 self.expand(second,first.type))
1312
1313 - def ADD(self, first, second):
1314 return '(%s + %s)' % (self.expand(first), 1315 self.expand(second, first.type))
1316
1317 - def SUB(self, first, second):
1318 return '(%s - %s)' % (self.expand(first), 1319 self.expand(second, first.type))
1320
1321 - def MUL(self, first, second):
1322 return '(%s * %s)' % (self.expand(first), 1323 self.expand(second, first.type))
1324
1325 - def DIV(self, first, second):
1326 return '(%s / %s)' % (self.expand(first), 1327 self.expand(second, first.type))
1328
1329 - def MOD(self, first, second):
1330 return '(%s %% %s)' % (self.expand(first), 1331 self.expand(second, first.type))
1332
1333 - def AS(self, first, second):
1334 return '%s AS %s' % (self.expand(first), second)
1335
1336 - def ON(self, first, second):
1337 if use_common_filters(second): 1338 second = self.common_filter(second,[first._tablename]) 1339 return '%s ON %s' % (self.expand(first), self.expand(second))
1340
1341 - def INVERT(self, first):
1342 return '%s DESC' % self.expand(first)
1343
1344 - def COMMA(self, first, second):
1345 return '%s, %s' % (self.expand(first), self.expand(second))
1346
1347 - def expand(self, expression, field_type=None):
1348 if isinstance(expression, Field): 1349 return '%s.%s' % (expression.tablename, expression.name) 1350 elif isinstance(expression, (Expression, Query)): 1351 first = expression.first 1352 second = expression.second 1353 op = expression.op 1354 optional_args = expression.optional_args or {} 1355 if not second is None: 1356 return op(first, second, **optional_args) 1357 elif not first is None: 1358 return op(first,**optional_args) 1359 elif isinstance(op, str): 1360 if op.endswith(';'): 1361 op=op[:-1] 1362 return '(%s)' % op 1363 else: 1364 return op() 1365 elif field_type: 1366 return str(self.represent(expression,field_type)) 1367 elif isinstance(expression,(list,tuple)): 1368 return ','.join(self.represent(item,field_type) \ 1369 for item in expression) 1370 elif isinstance(expression, bool): 1371 return '1' if expression else '0' 1372 else: 1373 return str(expression)
1374
1375 - def alias(self, table, alias):
1376 """ 1377 Given a table object, makes a new table object 1378 with alias name. 1379 """ 1380 other = copy.copy(table) 1381 other['_ot'] = other._tablename 1382 other['ALL'] = SQLALL(other) 1383 other['_tablename'] = alias 1384 for fieldname in other.fields: 1385 other[fieldname] = copy.copy(other[fieldname]) 1386 other[fieldname]._tablename = alias 1387 other[fieldname].tablename = alias 1388 other[fieldname].table = other 1389 table._db[alias] = other 1390 return other
1391
1392 - def _truncate(self, table, mode=''):
1393 tablename = table._tablename 1394 return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
1395
1396 - def truncate(self, table, mode= ' '):
1397 # Prepare functions "write_to_logfile" and "close_logfile" 1398 if table._dbt: 1399 logfile = self.file_open(table._loggername, 'a') 1400 else: 1401 class Logfile(object): 1402 def write(self, value): 1403 pass
1404 def close(self): 1405 pass 1406 logfile = Logfile() 1407 1408 try: 1409 queries = table._db._adapter._truncate(table, mode) 1410 for query in queries: 1411 logfile.write(query + '\n') 1412 self.execute(query) 1413 table._db.commit() 1414 logfile.write('success!\n') 1415 finally: 1416 logfile.close() 1417
1418 - def _update(self, tablename, query, fields):
1419 if query: 1420 if use_common_filters(query): 1421 query = self.common_filter(query, [tablename]) 1422 sql_w = ' WHERE ' + self.expand(query) 1423 else: 1424 sql_w = '' 1425 sql_v = ','.join(['%s=%s' % (field.name, 1426 self.expand(value, field.type)) \ 1427 for (field, value) in fields]) 1428 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
1429
1430 - def update(self, tablename, query, fields):
1431 sql = self._update(tablename, query, fields) 1432 self.execute(sql) 1433 try: 1434 return self.cursor.rowcount 1435 except: 1436 return None
1437
1438 - def _delete(self, tablename, query):
1439 if query: 1440 if use_common_filters(query): 1441 query = self.common_filter(query, [tablename]) 1442 sql_w = ' WHERE ' + self.expand(query) 1443 else: 1444 sql_w = '' 1445 return 'DELETE FROM %s%s;' % (tablename, sql_w)
1446
1447 - def delete(self, tablename, query):
1448 sql = self._delete(tablename, query) 1449 ### special code to handle CASCADE in SQLite & SpatiaLite 1450 db = self.db 1451 table = db[tablename] 1452 if self.dbengine in ('sqlite', 'spatialite') and table._referenced_by: 1453 deleted = [x[table._id.name] for x in db(query).select(table._id)] 1454 ### end special code to handle CASCADE in SQLite & SpatiaLite 1455 self.execute(sql) 1456 try: 1457 counter = self.cursor.rowcount 1458 except: 1459 counter = None 1460 ### special code to handle CASCADE in SQLite & SpatiaLite 1461 if self.dbengine in ('sqlite', 'spatialite') and counter: 1462 for field in table._referenced_by: 1463 if field.type=='reference '+table._tablename \ 1464 and field.ondelete=='CASCADE': 1465 db(field.belongs(deleted)).delete() 1466 ### end special code to handle CASCADE in SQLite & SpatiaLite 1467 return counter
1468
1469 - def get_table(self, query):
1470 tablenames = self.tables(query) 1471 if len(tablenames)==1: 1472 return tablenames[0] 1473 elif len(tablenames)<1: 1474 raise RuntimeError("No table selected") 1475 else: 1476 raise RuntimeError("Too many tables selected")
1477
1478 - def expand_all(self, fields, tablenames):
1479 db = self.db 1480 new_fields = [] 1481 append = new_fields.append 1482 for item in fields: 1483 if isinstance(item,SQLALL): 1484 new_fields += item._table 1485 elif isinstance(item,str): 1486 if REGEX_TABLE_DOT_FIELD.match(item): 1487 tablename,fieldname = item.split('.') 1488 append(db[tablename][fieldname]) 1489 else: 1490 append(Expression(db,lambda item=item:item)) 1491 else: 1492 append(item) 1493 # ## if no fields specified take them all from the requested tables 1494 if not new_fields: 1495 for table in tablenames: 1496 for field in db[table]: 1497 append(field) 1498 return new_fields
1499
1500 - def _select(self, query, fields, attributes):
1501 tables = self.tables 1502 for key in set(attributes.keys())-SELECT_ARGS: 1503 raise SyntaxError('invalid select attribute: %s' % key) 1504 args_get = attributes.get 1505 tablenames = tables(query) 1506 for field in fields: 1507 if isinstance(field, basestring) \ 1508 and REGEX_TABLE_DOT_FIELD.match(field): 1509 tn,fn = field.split('.') 1510 field = self.db[tn][fn] 1511 for tablename in tables(field): 1512 if not tablename in tablenames: 1513 tablenames.append(tablename) 1514 1515 if len(tablenames) < 1: 1516 raise SyntaxError('Set: no tables selected') 1517 self._colnames = map(self.expand, fields) 1518 def geoexpand(field): 1519 if isinstance(field.type,str) and field.type.startswith('geometry'): 1520 field = field.st_astext() 1521 return self.expand(field)
1522 sql_f = ', '.join(map(geoexpand, fields)) 1523 sql_o = '' 1524 sql_s = '' 1525 left = args_get('left', False) 1526 inner_join = args_get('join', False) 1527 distinct = args_get('distinct', False) 1528 groupby = args_get('groupby', False) 1529 orderby = args_get('orderby', False) 1530 having = args_get('having', False) 1531 limitby = args_get('limitby', False) 1532 for_update = args_get('for_update', False) 1533 if self.can_select_for_update is False and for_update is True: 1534 raise SyntaxError('invalid select attribute: for_update') 1535 if distinct is True: 1536 sql_s += 'DISTINCT' 1537 elif distinct: 1538 sql_s += 'DISTINCT ON (%s)' % distinct 1539 if inner_join: 1540 icommand = self.JOIN() 1541 if not isinstance(inner_join, (tuple, list)): 1542 inner_join = [inner_join] 1543 ijoint = [t._tablename for t in inner_join 1544 if not isinstance(t,Expression)] 1545 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1546 itables_to_merge={} #issue 490 1547 [itables_to_merge.update( 1548 dict.fromkeys(tables(t))) for t in ijoinon] 1549 ijoinont = [t.first._tablename for t in ijoinon] 1550 [itables_to_merge.pop(t) for t in ijoinont 1551 if t in itables_to_merge] #issue 490 1552 iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys() 1553 iexcluded = [t for t in tablenames 1554 if not t in iimportant_tablenames] 1555 if left: 1556 join = attributes['left'] 1557 command = self.LEFT_JOIN() 1558 if not isinstance(join, (tuple, list)): 1559 join = [join] 1560 joint = [t._tablename for t in join 1561 if not isinstance(t, Expression)] 1562 joinon = [t for t in join if isinstance(t, Expression)] 1563 #patch join+left patch (solves problem with ordering in left joins) 1564 tables_to_merge={} 1565 [tables_to_merge.update( 1566 dict.fromkeys(tables(t))) for t in joinon] 1567 joinont = [t.first._tablename for t in joinon] 1568 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1569 important_tablenames = joint + joinont + tables_to_merge.keys() 1570 excluded = [t for t in tablenames 1571 if not t in important_tablenames ] 1572 else: 1573 excluded = tablenames 1574 1575 if use_common_filters(query): 1576 query = self.common_filter(query,excluded) 1577 sql_w = ' WHERE ' + self.expand(query) if query else '' 1578 1579 def alias(t): 1580 return str(self.db[t]) 1581 if inner_join and not left: 1582 sql_t = ', '.join([alias(t) for t in iexcluded + \ 1583 itables_to_merge.keys()]) 1584 for t in ijoinon: 1585 sql_t += ' %s %s' % (icommand, str(t)) 1586 elif not inner_join and left: 1587 sql_t = ', '.join([alias(t) for t in excluded + \ 1588 tables_to_merge.keys()]) 1589 if joint: 1590 sql_t += ' %s %s' % (command, ','.join([t for t in joint])) 1591 for t in joinon: 1592 sql_t += ' %s %s' % (command, str(t)) 1593 elif inner_join and left: 1594 all_tables_in_query = set(important_tablenames + \ 1595 iimportant_tablenames + \ 1596 tablenames) 1597 tables_in_joinon = set(joinont + ijoinont) 1598 tables_not_in_joinon = \ 1599 all_tables_in_query.difference(tables_in_joinon) 1600 sql_t = ','.join([alias(t) for t in tables_not_in_joinon]) 1601 for t in ijoinon: 1602 sql_t += ' %s %s' % (icommand, str(t)) 1603 if joint: 1604 sql_t += ' %s %s' % (command, ','.join([t for t in joint])) 1605 for t in joinon: 1606 sql_t += ' %s %s' % (command, str(t)) 1607 else: 1608 sql_t = ', '.join(alias(t) for t in tablenames) 1609 if groupby: 1610 if isinstance(groupby, (list, tuple)): 1611 groupby = xorify(groupby) 1612 sql_o += ' GROUP BY %s' % self.expand(groupby) 1613 if having: 1614 sql_o += ' HAVING %s' % attributes['having'] 1615 if orderby: 1616 if isinstance(orderby, (list, tuple)): 1617 orderby = xorify(orderby) 1618 if str(orderby) == '<random>': 1619 sql_o += ' ORDER BY %s' % self.RANDOM() 1620 else: 1621 sql_o += ' ORDER BY %s' % self.expand(orderby) 1622 if limitby: 1623 if not orderby and tablenames: 1624 sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in (hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey or [self.db[t]._id.name])]) 1625 # oracle does not support limitby 1626 sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) 1627 if for_update and self.can_select_for_update is True: 1628 sql = sql.rstrip(';') + ' FOR UPDATE;' 1629 return sql 1630
1631 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1632 if limitby: 1633 (lmin, lmax) = limitby 1634 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1635 return 'SELECT %s %s FROM %s%s%s;' % \ 1636 (sql_s, sql_f, sql_t, sql_w, sql_o)
1637
1638 - def _fetchall(self):
1639 return self.cursor.fetchall()
1640
1641 - def _select_aux(self,sql,fields,attributes):
1642 args_get = attributes.get 1643 cache = args_get('cache',None) 1644 if not cache: 1645 self.execute(sql) 1646 rows = self._fetchall() 1647 else: 1648 (cache_model, time_expire) = cache 1649 key = self.uri + '/' + sql + '/rows' 1650 if len(key)>200: key = hashlib_md5(key).hexdigest() 1651 def _select_aux2(): 1652 self.execute(sql) 1653 return self._fetchall()
1654 rows = cache_model(key,_select_aux2,time_expire) 1655 if isinstance(rows,tuple): 1656 rows = list(rows) 1657 limitby = args_get('limitby', None) or (0,) 1658 rows = self.rowslice(rows,limitby[0],None) 1659 processor = args_get('processor',self.parse) 1660 cacheable = args_get('cacheable',False) 1661 return processor(rows,fields,self._colnames,cacheable=cacheable) 1662
1663 - def select(self, query, fields, attributes):
1664 """ 1665 Always returns a Rows object, possibly empty. 1666 """ 1667 sql = self._select(query, fields, attributes) 1668 cache = attributes.get('cache', None) 1669 if cache and attributes.get('cacheable',False): 1670 del attributes['cache'] 1671 (cache_model, time_expire) = cache 1672 key = self.uri + '/' + sql 1673 if len(key)>200: key = hashlib_md5(key).hexdigest() 1674 args = (sql,fields,attributes) 1675 return cache_model( 1676 key, 1677 lambda self=self,args=args:self._select_aux(*args), 1678 time_expire) 1679 else: 1680 return self._select_aux(sql,fields,attributes)
1681
1682 - def _count(self, query, distinct=None):
1683 tablenames = self.tables(query) 1684 if query: 1685 if use_common_filters(query): 1686 query = self.common_filter(query, tablenames) 1687 sql_w = ' WHERE ' + self.expand(query) 1688 else: 1689 sql_w = '' 1690 sql_t = ','.join(tablenames) 1691 if distinct: 1692 if isinstance(distinct,(list, tuple)): 1693 distinct = xorify(distinct) 1694 sql_d = self.expand(distinct) 1695 return 'SELECT count(DISTINCT %s) FROM %s%s;' % \ 1696 (sql_d, sql_t, sql_w) 1697 return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
1698
1699 - def count(self, query, distinct=None):
1700 self.execute(self._count(query, distinct)) 1701 return self.cursor.fetchone()[0]
1702
1703 - def tables(self, *queries):
1704 tables = set() 1705 for query in queries: 1706 if isinstance(query, Field): 1707 tables.add(query.tablename) 1708 elif isinstance(query, (Expression, Query)): 1709 if not query.first is None: 1710 tables = tables.union(self.tables(query.first)) 1711 if not query.second is None: 1712 tables = tables.union(self.tables(query.second)) 1713 return list(tables)
1714
1715 - def commit(self):
1716 if self.connection: return self.connection.commit()
1717
1718 - def rollback(self):
1719 if self.connection: return self.connection.rollback()
1720
1721 - def close_connection(self):
1722 if self.connection: return self.connection.close()
1723
1724 - def distributed_transaction_begin(self, key):
1725 return
1726
1727 - def prepare(self, key):
1728 if self.connection: self.connection.prepare()
1729
1730 - def commit_prepared(self, key):
1731 if self.connection: self.connection.commit()
1732
1733 - def rollback_prepared(self, key):
1734 if self.connection: self.connection.rollback()
1735
1736 - def concat_add(self, tablename):
1737 return ', ADD '
1738
1739 - def constraint_name(self, table, fieldname):
1740 return '%s_%s__constraint' % (table,fieldname)
1741
1742 - def create_sequence_and_triggers(self, query, table, **args):
1743 self.execute(query)
1744
1745 - def log_execute(self, *a, **b):
1746 if not self.connection: return None 1747 command = a[0] 1748 if self.db._debug: 1749 LOGGER.debug('SQL: %s' % command) 1750 self.db._lastsql = command 1751 t0 = time.time() 1752 ret = self.cursor.execute(*a, **b) 1753 self.db._timings.append((command,time.time()-t0)) 1754 del self.db._timings[:-TIMINGSSIZE] 1755 return ret
1756
1757 - def execute(self, *a, **b):
1758 return self.log_execute(*a, **b)
1759
1760 - def represent(self, obj, fieldtype):
1761 field_is_type = fieldtype.startswith 1762 if isinstance(obj, CALLABLETYPES): 1763 obj = obj() 1764 if isinstance(fieldtype, SQLCustomType): 1765 value = fieldtype.encoder(obj) 1766 if fieldtype.type in ('string','text', 'json'): 1767 return self.adapt(value) 1768 return value 1769 if isinstance(obj, (Expression, Field)): 1770 return str(obj) 1771 if field_is_type('list:'): 1772 if not obj: 1773 obj = [] 1774 elif not isinstance(obj, (list, tuple)): 1775 obj = [obj] 1776 if field_is_type('list:string'): 1777 obj = map(str,obj) 1778 else: 1779 obj = map(int,obj) 1780 # we don't want to bar_encode json objects 1781 if isinstance(obj, (list, tuple)) and (not fieldtype == "json"): 1782 obj = bar_encode(obj) 1783 if obj is None: 1784 return 'NULL' 1785 if obj == '' and not fieldtype[:2] in ['st', 'te', 'js', 'pa', 'up']: 1786 return 'NULL' 1787 r = self.represent_exceptions(obj, fieldtype) 1788 if not r is None: 1789 return r 1790 if fieldtype == 'boolean': 1791 if obj and not str(obj)[:1].upper() in '0F': 1792 return self.smart_adapt(self.TRUE) 1793 else: 1794 return self.smart_adapt(self.FALSE) 1795 if fieldtype == 'id' or fieldtype == 'integer': 1796 return str(int(obj)) 1797 if field_is_type('decimal'): 1798 return str(obj) 1799 elif field_is_type('reference'): # reference 1800 if fieldtype.find('.')>0: 1801 return repr(obj) 1802 elif isinstance(obj, (Row, Reference)): 1803 return str(obj['id']) 1804 return str(int(obj)) 1805 elif fieldtype == 'double': 1806 return repr(float(obj)) 1807 if isinstance(obj, unicode): 1808 obj = obj.encode(self.db_codec) 1809 if fieldtype == 'blob': 1810 obj = base64.b64encode(str(obj)) 1811 elif fieldtype == 'date': 1812 if isinstance(obj, (datetime.date, datetime.datetime)): 1813 obj = obj.isoformat()[:10] 1814 else: 1815 obj = str(obj) 1816 elif fieldtype == 'datetime': 1817 if isinstance(obj, datetime.datetime): 1818 obj = obj.isoformat()[:19].replace('T',' ') 1819 elif isinstance(obj, datetime.date): 1820 obj = obj.isoformat()[:10]+' 00:00:00' 1821 else: 1822 obj = str(obj) 1823 elif fieldtype == 'time': 1824 if isinstance(obj, datetime.time): 1825 obj = obj.isoformat()[:10] 1826 else: 1827 obj = str(obj) 1828 elif fieldtype == 'json': 1829 if not self.native_json: 1830 if have_serializers: 1831 obj = serializers.json(obj) 1832 elif simplejson: 1833 obj = simplejson.dumps(items) 1834 else: 1835 raise RuntimeError("missing simplejson") 1836 if not isinstance(obj,bytes): 1837 obj = bytes(obj) 1838 try: 1839 obj.decode(self.db_codec) 1840 except: 1841 obj = obj.decode('latin1').encode(self.db_codec) 1842 return self.adapt(obj)
1843
1844 - def represent_exceptions(self, obj, fieldtype):
1845 return None
1846
1847 - def lastrowid(self, table):
1848 return None
1849
1850 - def integrity_error_class(self):
1851 return type(None)
1852
1853 - def rowslice(self, rows, minimum=0, maximum=None):
1854 """ 1855 By default this function does nothing; 1856 overload when db does not do slicing. 1857 """ 1858 return rows
1859
1860 - def parse_value(self, value, field_type, blob_decode=True):
1861 if field_type != 'blob' and isinstance(value, str): 1862 try: 1863 value = value.decode(self.db._db_codec) 1864 except Exception: 1865 pass 1866 if isinstance(value, unicode): 1867 value = value.encode('utf-8') 1868 if isinstance(field_type, SQLCustomType): 1869 value = field_type.decoder(value) 1870 if not isinstance(field_type, str) or value is None: 1871 return value 1872 elif field_type in ('string', 'text', 'password', 'upload', 'dict'): 1873 return value 1874 elif field_type.startswith('geo'): 1875 return value 1876 elif field_type == 'blob' and not blob_decode: 1877 return value 1878 else: 1879 key = REGEX_TYPE.match(field_type).group(0) 1880 return self.parsemap[key](value,field_type)
1881
1882 - def parse_reference(self, value, field_type):
1883 referee = field_type[10:].strip() 1884 if not '.' in referee: 1885 value = Reference(value) 1886 value._table, value._record = self.db[referee], None 1887 return value
1888
1889 - def parse_boolean(self, value, field_type):
1890 return value == True or str(value)[:1].lower() == 't'
1891
1892 - def parse_date(self, value, field_type):
1893 if isinstance(value, datetime.datetime): 1894 return value.date() 1895 if not isinstance(value, (datetime.date,datetime.datetime)): 1896 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 1897 value = datetime.date(y, m, d) 1898 return value
1899
1900 - def parse_time(self, value, field_type):
1901 if not isinstance(value, datetime.time): 1902 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 1903 if len(time_items) == 3: 1904 (h, mi, s) = time_items 1905 else: 1906 (h, mi, s) = time_items + [0] 1907 value = datetime.time(h, mi, s) 1908 return value
1909
1910 - def parse_datetime(self, value, field_type):
1911 if not isinstance(value, datetime.datetime): 1912 value = str(value) 1913 date_part,time_part,timezone = value[:10],value[11:19],value[19:] 1914 if '+' in timezone: 1915 ms,tz = timezone.split('+') 1916 h,m = tz.split(':') 1917 dt = datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1918 elif '-' in timezone: 1919 ms,tz = timezone.split('-') 1920 h,m = tz.split(':') 1921 dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1922 else: 1923 dt = None 1924 (y, m, d) = map(int,date_part.split('-')) 1925 time_parts = time_part and time_part.split(':')[:3] or (0,0,0) 1926 while len(time_parts)<3: time_parts.append(0) 1927 time_items = map(int,time_parts) 1928 (h, mi, s) = time_items 1929 value = datetime.datetime(y, m, d, h, mi, s) 1930 if dt: 1931 value = value + dt 1932 return value
1933
1934 - def parse_blob(self, value, field_type):
1935 return base64.b64decode(str(value))
1936
1937 - def parse_decimal(self, value, field_type):
1938 decimals = int(field_type[8:-1].split(',')[-1]) 1939 if self.dbengine in ('sqlite', 'spatialite'): 1940 value = ('%.' + str(decimals) + 'f') % value 1941 if not isinstance(value, decimal.Decimal): 1942 value = decimal.Decimal(str(value)) 1943 return value
1944
1945 - def parse_list_integers(self, value, field_type):
1946 if not self.dbengine=='google:datastore': 1947 value = bar_decode_integer(value) 1948 return value
1949
1950 - def parse_list_references(self, value, field_type):
1951 if not self.dbengine=='google:datastore': 1952 value = bar_decode_integer(value) 1953 return [self.parse_reference(r, field_type[5:]) for r in value]
1954
1955 - def parse_list_strings(self, value, field_type):
1956 if not self.dbengine=='google:datastore': 1957 value = bar_decode_string(value) 1958 return value
1959
1960 - def parse_id(self, value, field_type):
1961 return int(value)
1962
1963 - def parse_integer(self, value, field_type):
1964 return int(value)
1965
1966 - def parse_double(self, value, field_type):
1967 return float(value)
1968
1969 - def parse_json(self, value, field_type):
1970 if not self.native_json: 1971 if not isinstance(value, basestring): 1972 raise RuntimeError('json data not a string') 1973 if isinstance(value, unicode): 1974 value = value.encode('utf-8') 1975 if have_serializers: 1976 value = serializers.loads_json(value) 1977 elif simplejson: 1978 value = simplejson.loads(value) 1979 else: 1980 raise RuntimeError("missing simplejson") 1981 return value
1982
1983 - def build_parsemap(self):
1984 self.parsemap = { 1985 'id':self.parse_id, 1986 'integer':self.parse_integer, 1987 'bigint':self.parse_integer, 1988 'float':self.parse_double, 1989 'double':self.parse_double, 1990 'reference':self.parse_reference, 1991 'boolean':self.parse_boolean, 1992 'date':self.parse_date, 1993 'time':self.parse_time, 1994 'datetime':self.parse_datetime, 1995 'blob':self.parse_blob, 1996 'decimal':self.parse_decimal, 1997 'json':self.parse_json, 1998 'list:integer':self.parse_list_integers, 1999 'list:reference':self.parse_list_references, 2000 'list:string':self.parse_list_strings, 2001 }
2002
2003 - def parse(self, rows, fields, colnames, blob_decode=True, 2004 cacheable = False):
2005 self.build_parsemap() 2006 db = self.db 2007 virtualtables = [] 2008 new_rows = [] 2009 tmps = [] 2010 for colname in colnames: 2011 if not REGEX_TABLE_DOT_FIELD.match(colname): 2012 tmps.append(None) 2013 else: 2014 (tablename, fieldname) = colname.split('.') 2015 table = db[tablename] 2016 field = table[fieldname] 2017 ft = field.type 2018 tmps.append((tablename,fieldname,table,field,ft)) 2019 for (i,row) in enumerate(rows): 2020 new_row = Row() 2021 for (j,colname) in enumerate(colnames): 2022 value = row[j] 2023 tmp = tmps[j] 2024 if tmp: 2025 (tablename,fieldname,table,field,ft) = tmp 2026 if tablename in new_row: 2027 colset = new_row[tablename] 2028 else: 2029 colset = new_row[tablename] = Row() 2030 if tablename not in virtualtables: 2031 virtualtables.append(tablename) 2032 value = self.parse_value(value,ft,blob_decode) 2033 if field.filter_out: 2034 value = field.filter_out(value) 2035 colset[fieldname] = value 2036 2037 # for backward compatibility 2038 if ft=='id' and fieldname!='id' and \ 2039 not 'id' in table.fields: 2040 colset['id'] = value 2041 2042 if ft == 'id' and not cacheable: 2043 # temporary hack to deal with 2044 # GoogleDatastoreAdapter 2045 # references 2046 if isinstance(self, GoogleDatastoreAdapter): 2047 id = value.key().id_or_name() 2048 colset[fieldname] = id 2049 colset.gae_item = value 2050 else: 2051 id = value 2052 colset.update_record = RecordUpdater(colset,table,id) 2053 colset.delete_record = RecordDeleter(table,id) 2054 for rfield in table._referenced_by: 2055 referee_link = db._referee_name and \ 2056 db._referee_name % dict( 2057 table=rfield.tablename,field=rfield.name) 2058 if referee_link and not referee_link in colset: 2059 colset[referee_link] = LazySet(rfield,id) 2060 else: 2061 if not '_extra' in new_row: 2062 new_row['_extra'] = Row() 2063 new_row['_extra'][colname] = \ 2064 self.parse_value(value, 2065 fields[j].type,blob_decode) 2066 new_column_name = \ 2067 REGEX_SELECT_AS_PARSER.search(colname) 2068 if not new_column_name is None: 2069 column_name = new_column_name.groups(0) 2070 setattr(new_row,column_name[0],value) 2071 new_rows.append(new_row) 2072 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 2073 2074 for tablename in virtualtables: 2075 ### new style virtual fields 2076 table = db[tablename] 2077 fields_virtual = [(f,v) for (f,v) in table.iteritems() 2078 if isinstance(v,FieldVirtual)] 2079 fields_lazy = [(f,v) for (f,v) in table.iteritems() 2080 if isinstance(v,FieldMethod)] 2081 if fields_virtual or fields_lazy: 2082 for row in rowsobj.records: 2083 box = row[tablename] 2084 for f,v in fields_virtual: 2085 box[f] = v.f(row) 2086 for f,v in fields_lazy: 2087 box[f] = (v.handler or VirtualCommand)(v.f,row) 2088 2089 ### old style virtual fields 2090 for item in table.virtualfields: 2091 try: 2092 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 2093 except (KeyError, AttributeError): 2094 # to avoid breaking virtualfields when partial select 2095 pass 2096 return rowsobj
2097
2098 - def common_filter(self, query, tablenames):
2099 tenant_fieldname = self.db._request_tenant 2100 2101 for tablename in tablenames: 2102 table = self.db[tablename] 2103 2104 # deal with user provided filters 2105 if table._common_filter != None: 2106 query = query & table._common_filter(query) 2107 2108 # deal with multi_tenant filters 2109 if tenant_fieldname in table: 2110 default = table[tenant_fieldname].default 2111 if not default is None: 2112 newquery = table[tenant_fieldname] == default 2113 if query is None: 2114 query = newquery 2115 else: 2116 query = query & newquery 2117 return query
2118
2119 - def CASE(self,query,t,f):
2120 def represent(x): 2121 types = {type(True):'boolean',type(0):'integer',type(1.0):'double'} 2122 if x is None: return 'NULL' 2123 elif isinstance(x,Expression): return str(x) 2124 else: return self.represent(x,types.get(type(x),'string'))
2125 return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \ 2126 (self.expand(query),represent(t),represent(f))) 2127
2128 ################################################################################### 2129 # List of all the available adapters; they all extend BaseAdapter. 2130 ################################################################################### 2131 2132 -class SQLiteAdapter(BaseAdapter):
2133 drivers = ('sqlite2','sqlite3') 2134 2135 can_select_for_update = None # support ourselves with BEGIN TRANSACTION 2136
2137 - def EXTRACT(self,field,what):
2138 return "web2py_extract('%s',%s)" % (what, self.expand(field))
2139 2140 @staticmethod
2141 - def web2py_extract(lookup, s):
2142 table = { 2143 'year': (0, 4), 2144 'month': (5, 7), 2145 'day': (8, 10), 2146 'hour': (11, 13), 2147 'minute': (14, 16), 2148 'second': (17, 19), 2149 } 2150 try: 2151 if lookup != 'epoch': 2152 (i, j) = table[lookup] 2153 return int(s[i:j]) 2154 else: 2155 return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple()) 2156 except: 2157 return None
2158 2159 @staticmethod
2160 - def web2py_regexp(expression, item):
2161 return re.compile(expression).search(item) is not None
2162
2163 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2164 credential_decoder=IDENTITY, driver_args={}, 2165 adapter_args={}, do_connect=True, after_connection=None):
2166 self.db = db 2167 self.dbengine = "sqlite" 2168 self.uri = uri 2169 if do_connect: self.find_driver(adapter_args) 2170 self.pool_size = 0 2171 self.folder = folder 2172 self.db_codec = db_codec 2173 self._after_connection = after_connection 2174 self.find_or_make_work_folder() 2175 path_encoding = sys.getfilesystemencoding() \ 2176 or locale.getdefaultlocale()[1] or 'utf8' 2177 if uri.startswith('sqlite:memory'): 2178 dbpath = ':memory:' 2179 else: 2180 dbpath = uri.split('://',1)[1] 2181 if dbpath[0] != '/': 2182 if PYTHON_VERSION == 2: 2183 dbpath = pjoin( 2184 self.folder.decode(path_encoding).encode('utf8'), dbpath) 2185 else: 2186 dbpath = pjoin(self.folder, dbpath) 2187 if not 'check_same_thread' in driver_args: 2188 driver_args['check_same_thread'] = False 2189 if not 'detect_types' in driver_args and do_connect: 2190 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2191 def connector(dbpath=dbpath, driver_args=driver_args): 2192 return self.driver.Connection(dbpath, **driver_args)
2193 self.connector = connector 2194 if do_connect: self.reconnect()
2195
2196 - def after_connection(self):
2197 self.connection.create_function('web2py_extract', 2, 2198 SQLiteAdapter.web2py_extract) 2199 self.connection.create_function("REGEXP", 2, 2200 SQLiteAdapter.web2py_regexp)
2201
2202 - def _truncate(self, table, mode=''):
2203 tablename = table._tablename 2204 return ['DELETE FROM %s;' % tablename, 2205 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
2206
2207 - def lastrowid(self, table):
2208 return self.cursor.lastrowid
2209
2210 - def REGEXP(self,first,second):
2211 return '(%s REGEXP %s)' % (self.expand(first), 2212 self.expand(second,'string'))
2213
2214 - def select(self, query, fields, attributes):
2215 """ 2216 Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION. 2217 Note that the entire database, rather than one record, is locked 2218 (it will be locked eventually anyway by the following UPDATE). 2219 """ 2220 if attributes.get('for_update', False) and not 'cache' in attributes: 2221 self.execute('BEGIN IMMEDIATE TRANSACTION;') 2222 return super(SQLiteAdapter, self).select(query, fields, attributes)
2223
2224 -class SpatiaLiteAdapter(SQLiteAdapter):
2225 drivers = ('sqlite3','sqlite2') 2226 2227 types = copy.copy(BaseAdapter.types) 2228 types.update(geometry='GEOMETRY') 2229
2230 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2231 credential_decoder=IDENTITY, driver_args={}, 2232 adapter_args={}, do_connect=True, srid=4326, after_connection=None):
2233 self.db = db 2234 self.dbengine = "spatialite" 2235 self.uri = uri 2236 if do_connect: self.find_driver(adapter_args) 2237 self.pool_size = 0 2238 self.folder = folder 2239 self.db_codec = db_codec 2240 self._after_connection = after_connection 2241 self.find_or_make_work_folder() 2242 self.srid = srid 2243 path_encoding = sys.getfilesystemencoding() \ 2244 or locale.getdefaultlocale()[1] or 'utf8' 2245 if uri.startswith('spatialite:memory'): 2246 dbpath = ':memory:' 2247 else: 2248 dbpath = uri.split('://',1)[1] 2249 if dbpath[0] != '/': 2250 dbpath = pjoin( 2251 self.folder.decode(path_encoding).encode('utf8'), dbpath) 2252 if not 'check_same_thread' in driver_args: 2253 driver_args['check_same_thread'] = False 2254 if not 'detect_types' in driver_args and do_connect: 2255 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2256 def connector(dbpath=dbpath, driver_args=driver_args): 2257 return self.driver.Connection(dbpath, **driver_args)
2258 self.connector = connector 2259 if do_connect: self.reconnect()
2260
2261 - def after_connection(self):
2262 self.connection.enable_load_extension(True) 2263 # for Windows, rename libspatialite-2.dll to libspatialite.dll 2264 # Linux uses libspatialite.so 2265 # Mac OS X uses libspatialite.dylib 2266 libspatialite = SPATIALLIBS[platform.system()] 2267 self.execute(r'SELECT load_extension("%s");') % libspatialite 2268 2269 self.connection.create_function('web2py_extract', 2, 2270 SQLiteAdapter.web2py_extract) 2271 self.connection.create_function("REGEXP", 2, 2272 SQLiteAdapter.web2py_regexp)
2273 2274 # GIS functions 2275
2276 - def ST_ASGEOJSON(self, first, second):
2277 return 'AsGeoJSON(%s,%s,%s)' %(self.expand(first), 2278 second['precision'], second['options'])
2279
2280 - def ST_ASTEXT(self, first):
2281 return 'AsText(%s)' %(self.expand(first))
2282
2283 - def ST_CONTAINS(self, first, second):
2284 return 'Contains(%s,%s)' %(self.expand(first), 2285 self.expand(second, first.type))
2286
2287 - def ST_DISTANCE(self, first, second):
2288 return 'Distance(%s,%s)' %(self.expand(first), 2289 self.expand(second, first.type))
2290
2291 - def ST_EQUALS(self, first, second):
2292 return 'Equals(%s,%s)' %(self.expand(first), 2293 self.expand(second, first.type))
2294
2295 - def ST_INTERSECTS(self, first, second):
2296 return 'Intersects(%s,%s)' %(self.expand(first), 2297 self.expand(second, first.type))
2298
2299 - def ST_OVERLAPS(self, first, second):
2300 return 'Overlaps(%s,%s)' %(self.expand(first), 2301 self.expand(second, first.type))
2302
2303 - def ST_SIMPLIFY(self, first, second):
2304 return 'Simplify(%s,%s)' %(self.expand(first), 2305 self.expand(second, 'double'))
2306
2307 - def ST_TOUCHES(self, first, second):
2308 return 'Touches(%s,%s)' %(self.expand(first), 2309 self.expand(second, first.type))
2310
2311 - def ST_WITHIN(self, first, second):
2312 return 'Within(%s,%s)' %(self.expand(first), 2313 self.expand(second, first.type))
2314
2315 - def represent(self, obj, fieldtype):
2316 field_is_type = fieldtype.startswith 2317 if field_is_type('geo'): 2318 srid = 4326 # Spatialite default srid for geometry 2319 geotype, parms = fieldtype[:-1].split('(') 2320 parms = parms.split(',') 2321 if len(parms) >= 2: 2322 schema, srid = parms[:2] 2323 # if field_is_type('geometry'): 2324 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2325 # elif field_is_type('geography'): 2326 # value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2327 # else: 2328 # raise SyntaxError, 'Invalid field type %s' %fieldtype 2329 return value 2330 return BaseAdapter.represent(self, obj, fieldtype)
2331
2332 2333 -class JDBCSQLiteAdapter(SQLiteAdapter):
2334 drivers = ('zxJDBC_sqlite',) 2335
2336 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 2337 credential_decoder=IDENTITY, driver_args={}, 2338 adapter_args={}, do_connect=True, after_connection=None):
2339 self.db = db 2340 self.dbengine = "sqlite" 2341 self.uri = uri 2342 if do_connect: self.find_driver(adapter_args) 2343 self.pool_size = pool_size 2344 self.folder = folder 2345 self.db_codec = db_codec 2346 self._after_connection = after_connection 2347 self.find_or_make_work_folder() 2348 path_encoding = sys.getfilesystemencoding() \ 2349 or locale.getdefaultlocale()[1] or 'utf8' 2350 if uri.startswith('sqlite:memory'): 2351 dbpath = ':memory:' 2352 else: 2353 dbpath = uri.split('://',1)[1] 2354 if dbpath[0] != '/': 2355 dbpath = pjoin( 2356 self.folder.decode(path_encoding).encode('utf8'), dbpath) 2357 def connector(dbpath=dbpath,driver_args=driver_args): 2358 return self.driver.connect( 2359 self.driver.getConnection('jdbc:sqlite:'+dbpath), 2360 **driver_args)
2361 self.connector = connector 2362 if do_connect: self.reconnect()
2363
2364 - def after_connection(self):
2365 # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs 2366 self.connection.create_function('web2py_extract', 2, 2367 SQLiteAdapter.web2py_extract)
2368
2369 - def execute(self, a):
2370 return self.log_execute(a)
2371
2372 2373 -class MySQLAdapter(BaseAdapter):
2374 drivers = ('MySQLdb','pymysql') 2375 2376 maxcharlength = 255 2377 commit_on_alter_table = True 2378 support_distributed_transaction = True 2379 types = { 2380 'boolean': 'CHAR(1)', 2381 'string': 'VARCHAR(%(length)s)', 2382 'text': 'LONGTEXT', 2383 'json': 'LONGTEXT', 2384 'password': 'VARCHAR(%(length)s)', 2385 'blob': 'LONGBLOB', 2386 'upload': 'VARCHAR(%(length)s)', 2387 'integer': 'INT', 2388 'bigint': 'BIGINT', 2389 'float': 'FLOAT', 2390 'double': 'DOUBLE', 2391 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2392 'date': 'DATE', 2393 'time': 'TIME', 2394 'datetime': 'DATETIME', 2395 'id': 'INT AUTO_INCREMENT NOT NULL', 2396 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2397 'list:integer': 'LONGTEXT', 2398 'list:string': 'LONGTEXT', 2399 'list:reference': 'LONGTEXT', 2400 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL', 2401 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2402 } 2403
2404 - def varquote(self,name):
2405 return varquote_aux(name,'`%s`')
2406
2407 - def RANDOM(self):
2408 return 'RAND()'
2409
2410 - def SUBSTRING(self,field,parameters):
2411 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), 2412 parameters[0], parameters[1])
2413
2414 - def EPOCH(self, first):
2415 return "UNIX_TIMESTAMP(%s)" % self.expand(first)
2416
2417 - def REGEXP(self,first,second):
2418 return '(%s REGEXP %s)' % (self.expand(first), 2419 self.expand(second,'string'))
2420
2421 - def _drop(self,table,mode):
2422 # breaks db integrity but without this mysql does not drop table 2423 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table, 2424 'SET FOREIGN_KEY_CHECKS=1;']
2425
2426 - def distributed_transaction_begin(self,key):
2427 self.execute('XA START;')
2428
2429 - def prepare(self,key):
2430 self.execute("XA END;") 2431 self.execute("XA PREPARE;")
2432
2433 - def commit_prepared(self,ley):
2434 self.execute("XA COMMIT;")
2435
2436 - def rollback_prepared(self,key):
2437 self.execute("XA ROLLBACK;")
2438 2439 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 2440
2441 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2442 credential_decoder=IDENTITY, driver_args={}, 2443 adapter_args={}, do_connect=True, after_connection=None):
2444 self.db = db 2445 self.dbengine = "mysql" 2446 self.uri = uri 2447 if do_connect: self.find_driver(adapter_args,uri) 2448 self.pool_size = pool_size 2449 self.folder = folder 2450 self.db_codec = db_codec 2451 self._after_connection = after_connection 2452 self.find_or_make_work_folder() 2453 ruri = uri.split('://',1)[1] 2454 m = self.REGEX_URI.match(ruri) 2455 if not m: 2456 raise SyntaxError( 2457 "Invalid URI string in DAL: %s" % self.uri) 2458 user = credential_decoder(m.group('user')) 2459 if not user: 2460 raise SyntaxError('User required') 2461 password = credential_decoder(m.group('password')) 2462 if not password: 2463 password = '' 2464 host = m.group('host') 2465 if not host: 2466 raise SyntaxError('Host name required') 2467 db = m.group('db') 2468 if not db: 2469 raise SyntaxError('Database name required') 2470 port = int(m.group('port') or '3306') 2471 charset = m.group('charset') or 'utf8' 2472 driver_args.update(db=db, 2473 user=credential_decoder(user), 2474 passwd=credential_decoder(password), 2475 host=host, 2476 port=port, 2477 charset=charset) 2478 2479 2480 def connector(driver_args=driver_args): 2481 return self.driver.connect(**driver_args)
2482 self.connector = connector 2483 if do_connect: self.reconnect()
2484
2485 - def after_connection(self):
2486 self.execute('SET FOREIGN_KEY_CHECKS=1;') 2487 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2488
2489 - def lastrowid(self,table):
2490 self.execute('select last_insert_id();') 2491 return int(self.cursor.fetchone()[0])
2492
2493 -class PostgreSQLAdapter(BaseAdapter):
2494 drivers = ('psycopg2','pg8000') 2495 2496 support_distributed_transaction = True 2497 types = { 2498 'boolean': 'CHAR(1)', 2499 'string': 'VARCHAR(%(length)s)', 2500 'text': 'TEXT', 2501 'json': 'TEXT', 2502 'password': 'VARCHAR(%(length)s)', 2503 'blob': 'BYTEA', 2504 'upload': 'VARCHAR(%(length)s)', 2505 'integer': 'INTEGER', 2506 'bigint': 'BIGINT', 2507 'float': 'FLOAT', 2508 'double': 'FLOAT8', 2509 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2510 'date': 'DATE', 2511 'time': 'TIME', 2512 'datetime': 'TIMESTAMP', 2513 'id': 'SERIAL PRIMARY KEY', 2514 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2515 'list:integer': 'TEXT', 2516 'list:string': 'TEXT', 2517 'list:reference': 'TEXT', 2518 'geometry': 'GEOMETRY', 2519 'geography': 'GEOGRAPHY', 2520 'big-id': 'BIGSERIAL PRIMARY KEY', 2521 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2522 } 2523
2524 - def varquote(self,name):
2525 return varquote_aux(name,'"%s"')
2526
2527 - def adapt(self,obj):
2528 if self.driver_name == 'psycopg2': 2529 return psycopg2_adapt(obj).getquoted() 2530 elif self.driver_name == 'pg8000': 2531 return "'%s'" % str(obj).replace("%","%%").replace("'","''") 2532 else: 2533 return "'%s'" % str(obj).replace("'","''")
2534
2535 - def sequence_name(self,table):
2536 return '%s_id_Seq' % table
2537
2538 - def RANDOM(self):
2539 return 'RANDOM()'
2540
2541 - def ADD(self, first, second):
2542 t = first.type 2543 if t in ('text','string','password', 'json', 'upload','blob'): 2544 return '(%s || %s)' % (self.expand(first), self.expand(second, t)) 2545 else: 2546 return '(%s + %s)' % (self.expand(first), self.expand(second, t))
2547
2548 - def distributed_transaction_begin(self,key):
2549 return
2550
2551 - def prepare(self,key):
2552 self.execute("PREPARE TRANSACTION '%s';" % key)
2553
2554 - def commit_prepared(self,key):
2555 self.execute("COMMIT PREPARED '%s';" % key)
2556
2557 - def rollback_prepared(self,key):
2558 self.execute("ROLLBACK PREPARED '%s';" % key)
2559
2560 - def create_sequence_and_triggers(self, query, table, **args):
2561 # following lines should only be executed if table._sequence_name does not exist 2562 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 2563 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 2564 # % (table._tablename, table._fieldname, table._sequence_name)) 2565 self.execute(query)
2566 2567 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 2568
2569 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2570 credential_decoder=IDENTITY, driver_args={}, 2571 adapter_args={}, do_connect=True, srid=4326, 2572 after_connection=None):
2573 self.db = db 2574 self.dbengine = "postgres" 2575 self.uri = uri 2576 if do_connect: self.find_driver(adapter_args,uri) 2577 self.pool_size = pool_size 2578 self.folder = folder 2579 self.db_codec = db_codec 2580 self._after_connection = after_connection 2581 self.srid = srid 2582 self.find_or_make_work_folder() 2583 ruri = uri.split('://',1)[1] 2584 m = self.REGEX_URI.match(ruri) 2585 if not m: 2586 raise SyntaxError("Invalid URI string in DAL") 2587 user = credential_decoder(m.group('user')) 2588 if not user: 2589 raise SyntaxError('User required') 2590 password = credential_decoder(m.group('password')) 2591 if not password: 2592 password = '' 2593 host = m.group('host') 2594 if not host: 2595 raise SyntaxError('Host name required') 2596 db = m.group('db') 2597 if not db: 2598 raise SyntaxError('Database name required') 2599 port = m.group('port') or '5432' 2600 sslmode = m.group('sslmode') 2601 if sslmode: 2602 msg = ("dbname='%s' user='%s' host='%s' " 2603 "port=%s password='%s' sslmode='%s'") \ 2604 % (db, user, host, port, password, sslmode) 2605 else: 2606 msg = ("dbname='%s' user='%s' host='%s' " 2607 "port=%s password='%s'") \ 2608 % (db, user, host, port, password) 2609 # choose diver according uri 2610 self.__version__ = "%s %s" % (self.driver.__name__, self.driver.__version__) 2611 def connector(msg=msg,driver_args=driver_args): 2612 return self.driver.connect(msg,**driver_args)
2613 self.connector = connector 2614 if do_connect: self.reconnect()
2615
2616 - def after_connection(self):
2617 self.connection.set_client_encoding('UTF8') 2618 self.execute("SET standard_conforming_strings=on;") 2619 self.try_json()
2620
2621 - def lastrowid(self,table):
2622 self.execute("select currval('%s')" % table._sequence_name) 2623 return int(self.cursor.fetchone()[0])
2624
2625 - def try_json(self):
2626 # check JSON data type support 2627 # (to be added to after_connection) 2628 if self.driver_name == "pg8000": 2629 supports_json = self.connection.server_version >= "9.2.0" 2630 elif (self.driver_name == "psycopg2") and \ 2631 (self.driver.__version__ >= "2.0.12"): 2632 supports_json = self.connection.server_version >= 90200 2633 elif self.driver_name == "zxJDBC": 2634 supports_json = self.connection.dbversion >= "9.2.0" 2635 else: supports_json = None 2636 if supports_json: self.types["json"] = "JSON" 2637 else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)")
2638
2639 - def LIKE(self,first,second):
2640 args = (self.expand(first), self.expand(second,'string')) 2641 if not first.type in ('string', 'text', 'json'): 2642 return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) 2643 else: 2644 return '(%s LIKE %s)' % args
2645
2646 - def ILIKE(self,first,second):
2647 args = (self.expand(first), self.expand(second,'string')) 2648 if not first.type in ('string', 'text', 'json'): 2649 return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) 2650 else: 2651 return '(%s ILIKE %s)' % args
2652
2653 - def REGEXP(self,first,second):
2654 return '(%s ~ %s)' % (self.expand(first), 2655 self.expand(second,'string'))
2656
2657 - def STARTSWITH(self,first,second):
2658 return '(%s ILIKE %s)' % (self.expand(first), 2659 self.expand(second+'%','string'))
2660
2661 - def ENDSWITH(self,first,second):
2662 return '(%s ILIKE %s)' % (self.expand(first), 2663 self.expand('%'+second,'string'))
2664
2665 - def CONTAINS(self,first,second,case_sensitive=False):
2666 if first.type in ('string','text', 'json'): 2667 key = '%'+str(second).replace('%','%%')+'%' 2668 elif first.type.startswith('list:'): 2669 key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' 2670 op = case_sensitive and self.LIKE or self.ILIKE 2671 return op(first,key)
2672 2673 # GIS functions 2674
2675 - def ST_ASGEOJSON(self, first, second):
2676 """ 2677 http://postgis.org/docs/ST_AsGeoJSON.html 2678 """ 2679 return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'], 2680 self.expand(first), second['precision'], second['options'])
2681
2682 - def ST_ASTEXT(self, first):
2683 """ 2684 http://postgis.org/docs/ST_AsText.html 2685 """ 2686 return 'ST_AsText(%s)' %(self.expand(first))
2687
2688 - def ST_X(self, first):
2689 """ 2690 http://postgis.org/docs/ST_X.html 2691 """ 2692 return 'ST_X(%s)' %(self.expand(first))
2693
2694 - def ST_Y(self, first):
2695 """ 2696 http://postgis.org/docs/ST_Y.html 2697 """ 2698 return 'ST_Y(%s)' %(self.expand(first))
2699
2700 - def ST_CONTAINS(self, first, second):
2701 """ 2702 http://postgis.org/docs/ST_Contains.html 2703 """ 2704 return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2705
2706 - def ST_DISTANCE(self, first, second):
2707 """ 2708 http://postgis.org/docs/ST_Distance.html 2709 """ 2710 return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2711
2712 - def ST_EQUALS(self, first, second):
2713 """ 2714 http://postgis.org/docs/ST_Equals.html 2715 """ 2716 return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2717
2718 - def ST_INTERSECTS(self, first, second):
2719 """ 2720 http://postgis.org/docs/ST_Intersects.html 2721 """ 2722 return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2723
2724 - def ST_OVERLAPS(self, first, second):
2725 """ 2726 http://postgis.org/docs/ST_Overlaps.html 2727 """ 2728 return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2729
2730 - def ST_SIMPLIFY(self, first, second):
2731 """ 2732 http://postgis.org/docs/ST_Simplify.html 2733 """ 2734 return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
2735
2736 - def ST_TOUCHES(self, first, second):
2737 """ 2738 http://postgis.org/docs/ST_Touches.html 2739 """ 2740 return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2741
2742 - def ST_WITHIN(self, first, second):
2743 """ 2744 http://postgis.org/docs/ST_Within.html 2745 """ 2746 return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2747
2748 - def represent(self, obj, fieldtype):
2749 field_is_type = fieldtype.startswith 2750 if field_is_type('geo'): 2751 srid = 4326 # postGIS default srid for geometry 2752 geotype, parms = fieldtype[:-1].split('(') 2753 parms = parms.split(',') 2754 if len(parms) >= 2: 2755 schema, srid = parms[:2] 2756 if field_is_type('geometry'): 2757 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2758 elif field_is_type('geography'): 2759 value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2760 # else: 2761 # raise SyntaxError('Invalid field type %s' %fieldtype) 2762 return value 2763 return BaseAdapter.represent(self, obj, fieldtype)
2764
2765 -class NewPostgreSQLAdapter(PostgreSQLAdapter):
2766 drivers = ('psycopg2','pg8000') 2767 2768 types = { 2769 'boolean': 'CHAR(1)', 2770 'string': 'VARCHAR(%(length)s)', 2771 'text': 'TEXT', 2772 'json': 'TEXT', 2773 'password': 'VARCHAR(%(length)s)', 2774 'blob': 'BYTEA', 2775 'upload': 'VARCHAR(%(length)s)', 2776 'integer': 'INTEGER', 2777 'bigint': 'BIGINT', 2778 'float': 'FLOAT', 2779 'double': 'FLOAT8', 2780 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2781 'date': 'DATE', 2782 'time': 'TIME', 2783 'datetime': 'TIMESTAMP', 2784 'id': 'SERIAL PRIMARY KEY', 2785 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2786 'list:integer': 'BIGINT[]', 2787 'list:string': 'TEXT[]', 2788 'list:reference': 'BIGINT[]', 2789 'geometry': 'GEOMETRY', 2790 'geography': 'GEOGRAPHY', 2791 'big-id': 'BIGSERIAL PRIMARY KEY', 2792 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2793 } 2794
2795 - def parse_list_integers(self, value, field_type):
2796 return value
2797
2798 - def parse_list_references(self, value, field_type):
2799 return [self.parse_reference(r, field_type[5:]) for r in value]
2800
2801 - def parse_list_strings(self, value, field_type):
2802 return value
2803
2804 - def represent(self, obj, fieldtype):
2805 field_is_type = fieldtype.startswith 2806 if field_is_type('list:'): 2807 if not obj: 2808 obj = [] 2809 elif not isinstance(obj, (list, tuple)): 2810 obj = [obj] 2811 if field_is_type('list:string'): 2812 obj = map(str,obj) 2813 else: 2814 obj = map(int,obj) 2815 return 'ARRAY[%s]' % ','.join(repr(item) for item in obj) 2816 return BaseAdapter.represent(self, obj, fieldtype)
2817
2818 2819 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
2820 drivers = ('zxJDBC',) 2821 2822 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 2823
2824 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2825 credential_decoder=IDENTITY, driver_args={}, 2826 adapter_args={}, do_connect=True, after_connection=None ):
2827 self.db = db 2828 self.dbengine = "postgres" 2829 self.uri = uri 2830 if do_connect: self.find_driver(adapter_args,uri) 2831 self.pool_size = pool_size 2832 self.folder = folder 2833 self.db_codec = db_codec 2834 self._after_connection = after_connection 2835 self.find_or_make_work_folder() 2836 ruri = uri.split('://',1)[1] 2837 m = self.REGEX_URI.match(ruri) 2838 if not m: 2839 raise SyntaxError("Invalid URI string in DAL") 2840 user = credential_decoder(m.group('user')) 2841 if not user: 2842 raise SyntaxError('User required') 2843 password = credential_decoder(m.group('password')) 2844 if not password: 2845 password = '' 2846 host = m.group('host') 2847 if not host: 2848 raise SyntaxError('Host name required') 2849 db = m.group('db') 2850 if not db: 2851 raise SyntaxError('Database name required') 2852 port = m.group('port') or '5432' 2853 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 2854 def connector(msg=msg,driver_args=driver_args): 2855 return self.driver.connect(*msg,**driver_args)
2856 self.connector = connector 2857 if do_connect: self.reconnect()
2858
2859 - def after_connection(self):
2860 self.connection.set_client_encoding('UTF8') 2861 self.execute('BEGIN;') 2862 self.execute("SET CLIENT_ENCODING TO 'UNICODE';") 2863 self.try_json()
2864
2865 2866 -class OracleAdapter(BaseAdapter):
2867 drivers = ('cx_Oracle',) 2868 2869 commit_on_alter_table = False 2870 types = { 2871 'boolean': 'CHAR(1)', 2872 'string': 'VARCHAR2(%(length)s)', 2873 'text': 'CLOB', 2874 'json': 'CLOB', 2875 'password': 'VARCHAR2(%(length)s)', 2876 'blob': 'CLOB', 2877 'upload': 'VARCHAR2(%(length)s)', 2878 'integer': 'INT', 2879 'bigint': 'NUMBER', 2880 'float': 'FLOAT', 2881 'double': 'BINARY_DOUBLE', 2882 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2883 'date': 'DATE', 2884 'time': 'CHAR(8)', 2885 'datetime': 'DATE', 2886 'id': 'NUMBER PRIMARY KEY', 2887 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2888 'list:integer': 'CLOB', 2889 'list:string': 'CLOB', 2890 'list:reference': 'CLOB', 2891 'big-id': 'NUMBER PRIMARY KEY', 2892 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2893 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2894 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2895 } 2896
2897 - def sequence_name(self,tablename):
2898 return '%s_sequence' % tablename
2899
2900 - def trigger_name(self,tablename):
2901 return '%s_trigger' % tablename
2902
2903 - def LEFT_JOIN(self):
2904 return 'LEFT OUTER JOIN'
2905
2906 - def RANDOM(self):
2907 return 'dbms_random.value'
2908
2909 - def NOT_NULL(self,default,field_type):
2910 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2911
2912 - def _drop(self,table,mode):
2913 sequence_name = table._sequence_name 2914 return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
2915
2916 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2917 if limitby: 2918 (lmin, lmax) = limitby 2919 if len(sql_w) > 1: 2920 sql_w_row = sql_w + ' AND w_row > %i' % lmin 2921 else: 2922 sql_w_row = 'WHERE w_row > %i' % lmin 2923 return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 2924 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2925
2926 - def constraint_name(self, tablename, fieldname):
2927 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 2928 if len(constraint_name)>30: 2929 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 2930 return constraint_name
2931
2932 - def represent_exceptions(self, obj, fieldtype):
2933 if fieldtype == 'blob': 2934 obj = base64.b64encode(str(obj)) 2935 return ":CLOB('%s')" % obj 2936 elif fieldtype == 'date': 2937 if isinstance(obj, (datetime.date, datetime.datetime)): 2938 obj = obj.isoformat()[:10] 2939 else: 2940 obj = str(obj) 2941 return "to_date('%s','yyyy-mm-dd')" % obj 2942 elif fieldtype == 'datetime': 2943 if isinstance(obj, datetime.datetime): 2944 obj = obj.isoformat()[:19].replace('T',' ') 2945 elif isinstance(obj, datetime.date): 2946 obj = obj.isoformat()[:10]+' 00:00:00' 2947 else: 2948 obj = str(obj) 2949 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 2950 return None
2951
2952 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2953 credential_decoder=IDENTITY, driver_args={}, 2954 adapter_args={}, do_connect=True, after_connection=None):
2955 self.db = db 2956 self.dbengine = "oracle" 2957 self.uri = uri 2958 if do_connect: self.find_driver(adapter_args,uri) 2959 self.pool_size = pool_size 2960 self.folder = folder 2961 self.db_codec = db_codec 2962 self._after_connection = after_connection 2963 self.find_or_make_work_folder() 2964 ruri = uri.split('://',1)[1] 2965 if not 'threaded' in driver_args: 2966 driver_args['threaded']=True 2967 def connector(uri=ruri,driver_args=driver_args): 2968 return self.driver.connect(uri,**driver_args)
2969 self.connector = connector 2970 if do_connect: self.reconnect()
2971
2972 - def after_connection(self):
2973 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 2974 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
2975 2976 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 2977
2978 - def execute(self, command, args=None):
2979 args = args or [] 2980 i = 1 2981 while True: 2982 m = self.oracle_fix.match(command) 2983 if not m: 2984 break 2985 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 2986 args.append(m.group('clob')[6:-2].replace("''", "'")) 2987 i += 1 2988 if command[-1:]==';': 2989 command = command[:-1] 2990 return self.log_execute(command, args)
2991
2992 - def create_sequence_and_triggers(self, query, table, **args):
2993 tablename = table._tablename 2994 sequence_name = table._sequence_name 2995 trigger_name = table._trigger_name 2996 self.execute(query) 2997 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name) 2998 self.execute(""" 2999 CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW 3000 DECLARE 3001 curr_val NUMBER; 3002 diff_val NUMBER; 3003 PRAGMA autonomous_transaction; 3004 BEGIN 3005 IF :NEW.id IS NOT NULL THEN 3006 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3007 diff_val := :NEW.id - curr_val - 1; 3008 IF diff_val != 0 THEN 3009 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val; 3010 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3011 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1'; 3012 END IF; 3013 END IF; 3014 SELECT %(sequence_name)s.nextval INTO :NEW.id FROM DUAL; 3015 END; 3016 """ % dict(trigger_name=trigger_name, tablename=tablename, sequence_name=sequence_name))
3017
3018 - def lastrowid(self,table):
3019 sequence_name = table._sequence_name 3020 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 3021 return int(self.cursor.fetchone()[0])
3022 3023 #def parse_value(self, value, field_type, blob_decode=True): 3024 # if blob_decode and isinstance(value, cx_Oracle.LOB): 3025 # try: 3026 # value = value.read() 3027 # except self.driver.ProgrammingError: 3028 # # After a subsequent fetch the LOB value is not valid anymore 3029 # pass 3030 # return BaseAdapter.parse_value(self, value, field_type, blob_decode) 3031
3032 - def _fetchall(self):
3033 if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description): 3034 return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \ 3035 for c in r]) for r in self.cursor] 3036 else: 3037 return self.cursor.fetchall()
3038
3039 -class MSSQLAdapter(BaseAdapter):
3040 drivers = ('pyodbc',) 3041 3042 types = { 3043 'boolean': 'BIT', 3044 'string': 'VARCHAR(%(length)s)', 3045 'text': 'TEXT', 3046 'json': 'TEXT', 3047 'password': 'VARCHAR(%(length)s)', 3048 'blob': 'IMAGE', 3049 'upload': 'VARCHAR(%(length)s)', 3050 'integer': 'INT', 3051 'bigint': 'BIGINT', 3052 'float': 'FLOAT', 3053 'double': 'FLOAT', 3054 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3055 'date': 'DATETIME', 3056 'time': 'CHAR(8)', 3057 'datetime': 'DATETIME', 3058 'id': 'INT IDENTITY PRIMARY KEY', 3059 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3060 'list:integer': 'TEXT', 3061 'list:string': 'TEXT', 3062 'list:reference': 'TEXT', 3063 'geometry': 'geometry', 3064 'geography': 'geography', 3065 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3066 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3067 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3068 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3069 } 3070
3071 - def concat_add(self,tablename):
3072 return '; ALTER TABLE %s ADD ' % tablename
3073
3074 - def varquote(self,name):
3075 return varquote_aux(name,'[%s]')
3076
3077 - def EXTRACT(self,field,what):
3078 return "DATEPART(%s,%s)" % (what, self.expand(field))
3079
3080 - def LEFT_JOIN(self):
3081 return 'LEFT OUTER JOIN'
3082
3083 - def RANDOM(self):
3084 return 'NEWID()'
3085
3086 - def ALLOW_NULL(self):
3087 return ' NULL'
3088
3089 - def SUBSTRING(self,field,parameters):
3090 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
3091
3092 - def PRIMARY_KEY(self,key):
3093 return 'PRIMARY KEY CLUSTERED (%s)' % key
3094
3095 - def AGGREGATE(self, first, what):
3096 if what == 'LENGTH': 3097 what = 'LEN' 3098 return "%s(%s)" % (what, self.expand(first))
3099 3100
3101 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3102 if limitby: 3103 (lmin, lmax) = limitby 3104 sql_s += ' TOP %i' % lmax 3105 if 'GROUP BY' in sql_o: 3106 sql_o = sql_o[:sql_o.find('ORDER BY ')] 3107 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3108 3109 TRUE = 1 3110 FALSE = 0 3111 3112 REGEX_DSN = re.compile('^(?P<dsn>.+)$') 3113 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$') 3114 REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 3115
3116 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3117 credential_decoder=IDENTITY, driver_args={}, 3118 adapter_args={}, do_connect=True, srid=4326, 3119 after_connection=None):
3120 self.db = db 3121 self.dbengine = "mssql" 3122 self.uri = uri 3123 if do_connect: self.find_driver(adapter_args,uri) 3124 self.pool_size = pool_size 3125 self.folder = folder 3126 self.db_codec = db_codec 3127 self._after_connection = after_connection 3128 self.srid = srid 3129 self.find_or_make_work_folder() 3130 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3131 ruri = uri.split('://',1)[1] 3132 if '@' not in ruri: 3133 try: 3134 m = self.REGEX_DSN.match(ruri) 3135 if not m: 3136 raise SyntaxError( 3137 'Parsing uri string(%s) has no result' % self.uri) 3138 dsn = m.group('dsn') 3139 if not dsn: 3140 raise SyntaxError('DSN required') 3141 except SyntaxError: 3142 e = sys.exc_info()[1] 3143 LOGGER.error('NdGpatch error') 3144 raise e 3145 # was cnxn = 'DSN=%s' % dsn 3146 cnxn = dsn 3147 else: 3148 m = self.REGEX_URI.match(ruri) 3149 if not m: 3150 raise SyntaxError( 3151 "Invalid URI string in DAL: %s" % self.uri) 3152 user = credential_decoder(m.group('user')) 3153 if not user: 3154 raise SyntaxError('User required') 3155 password = credential_decoder(m.group('password')) 3156 if not password: 3157 password = '' 3158 host = m.group('host') 3159 if not host: 3160 raise SyntaxError('Host name required') 3161 db = m.group('db') 3162 if not db: 3163 raise SyntaxError('Database name required') 3164 port = m.group('port') or '1433' 3165 # Parse the optional url name-value arg pairs after the '?' 3166 # (in the form of arg1=value1&arg2=value2&...) 3167 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 3168 argsdict = { 'DRIVER':'{SQL Server}' } 3169 urlargs = m.group('urlargs') or '' 3170 for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs): 3171 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 3172 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()]) 3173 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 3174 % (host, port, db, user, password, urlargs) 3175 def connector(cnxn=cnxn,driver_args=driver_args): 3176 return self.driver.connect(cnxn,**driver_args)
3177 self.connector = connector 3178 if do_connect: self.reconnect()
3179
3180 - def lastrowid(self,table):
3181 #self.execute('SELECT @@IDENTITY;') 3182 self.execute('SELECT SCOPE_IDENTITY();') 3183 return int(self.cursor.fetchone()[0])
3184
3185 - def integrity_error_class(self):
3186 return pyodbc.IntegrityError
3187
3188 - def rowslice(self,rows,minimum=0,maximum=None):
3189 if maximum is None: 3190 return rows[minimum:] 3191 return rows[minimum:maximum]
3192
3193 - def EPOCH(self, first):
3194 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3195 3196 # GIS Spatial Extensions 3197 3198 # No STAsGeoJSON in MSSQL 3199
3200 - def ST_ASTEXT(self, first):
3201 return '%s.STAsText()' %(self.expand(first))
3202
3203 - def ST_CONTAINS(self, first, second):
3204 return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
3205
3206 - def ST_DISTANCE(self, first, second):
3207 return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
3208
3209 - def ST_EQUALS(self, first, second):
3210 return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
3211
3212 - def ST_INTERSECTS(self, first, second):
3213 return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
3214
3215 - def ST_OVERLAPS(self, first, second):
3216 return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
3217 3218 # no STSimplify in MSSQL 3219
3220 - def ST_TOUCHES(self, first, second):
3221 return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
3222
3223 - def ST_WITHIN(self, first, second):
3224 return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
3225
3226 - def represent(self, obj, fieldtype):
3227 field_is_type = fieldtype.startswith 3228 if field_is_type('geometry'): 3229 srid = 0 # MS SQL default srid for geometry 3230 geotype, parms = fieldtype[:-1].split('(') 3231 if parms: 3232 srid = parms 3233 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3234 elif fieldtype == 'geography': 3235 srid = 4326 # MS SQL default srid for geography 3236 geotype, parms = fieldtype[:-1].split('(') 3237 if parms: 3238 srid = parms 3239 return "geography::STGeomFromText('%s',%s)" %(obj, srid) 3240 # else: 3241 # raise SyntaxError('Invalid field type %s' %fieldtype) 3242 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3243 return BaseAdapter.represent(self, obj, fieldtype)
3244
3245 3246 -class MSSQL3Adapter(MSSQLAdapter):
3247 """ experimental support for pagination in MSSQL"""
3248 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3249 if limitby: 3250 (lmin, lmax) = limitby 3251 if lmin == 0: 3252 sql_s += ' TOP %i' % lmax 3253 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) 3254 lmin += 1 3255 sql_o_inner = sql_o[sql_o.find('ORDER BY ')+9:] 3256 sql_g_inner = sql_o[:sql_o.find('ORDER BY ')] 3257 sql_f_outer = ['f_%s' % f for f in range(len(sql_f.split(',')))] 3258 sql_f_inner = [f for f in sql_f.split(',')] 3259 sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)] 3260 sql_f_iproxy = ', '.join(sql_f_iproxy) 3261 sql_f_oproxy = ', '.join(sql_f_outer) 3262 return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax) 3263 return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o)
3264 - def rowslice(self,rows,minimum=0,maximum=None):
3265 return rows
3266
3267 3268 -class MSSQL2Adapter(MSSQLAdapter):
3269 drivers = ('pyodbc',) 3270 3271 types = { 3272 'boolean': 'CHAR(1)', 3273 'string': 'NVARCHAR(%(length)s)', 3274 'text': 'NTEXT', 3275 'json': 'NTEXT', 3276 'password': 'NVARCHAR(%(length)s)', 3277 'blob': 'IMAGE', 3278 'upload': 'NVARCHAR(%(length)s)', 3279 'integer': 'INT', 3280 'bigint': 'BIGINT', 3281 'float': 'FLOAT', 3282 'double': 'FLOAT', 3283 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3284 'date': 'DATETIME', 3285 'time': 'CHAR(8)', 3286 'datetime': 'DATETIME', 3287 'id': 'INT IDENTITY PRIMARY KEY', 3288 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3289 'list:integer': 'NTEXT', 3290 'list:string': 'NTEXT', 3291 'list:reference': 'NTEXT', 3292 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3293 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3294 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3295 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3296 } 3297
3298 - def represent(self, obj, fieldtype):
3299 value = BaseAdapter.represent(self, obj, fieldtype) 3300 if fieldtype in ('string','text', 'json') and value[:1]=="'": 3301 value = 'N'+value 3302 return value
3303
3304 - def execute(self,a):
3305 return self.log_execute(a.decode('utf8'))
3306
3307 3308 -class SybaseAdapter(MSSQLAdapter):
3309 drivers = ('Sybase',) 3310 3311 types = { 3312 'boolean': 'BIT', 3313 'string': 'CHAR VARYING(%(length)s)', 3314 'text': 'TEXT', 3315 'json': 'TEXT', 3316 'password': 'CHAR VARYING(%(length)s)', 3317 'blob': 'IMAGE', 3318 'upload': 'CHAR VARYING(%(length)s)', 3319 'integer': 'INT', 3320 'bigint': 'BIGINT', 3321 'float': 'FLOAT', 3322 'double': 'FLOAT', 3323 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3324 'date': 'DATETIME', 3325 'time': 'CHAR(8)', 3326 'datetime': 'DATETIME', 3327 'id': 'INT IDENTITY PRIMARY KEY', 3328 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3329 'list:integer': 'TEXT', 3330 'list:string': 'TEXT', 3331 'list:reference': 'TEXT', 3332 'geometry': 'geometry', 3333 'geography': 'geography', 3334 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3335 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3336 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3337 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3338 } 3339 3340
3341 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3342 credential_decoder=IDENTITY, driver_args={}, 3343 adapter_args={}, do_connect=True, srid=4326, 3344 after_connection=None):
3345 self.db = db 3346 self.dbengine = "sybase" 3347 self.uri = uri 3348 if do_connect: self.find_driver(adapter_args,uri) 3349 self.pool_size = pool_size 3350 self.folder = folder 3351 self.db_codec = db_codec 3352 self._after_connection = after_connection 3353 self.srid = srid 3354 self.find_or_make_work_folder() 3355 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3356 ruri = uri.split('://',1)[1] 3357 if '@' not in ruri: 3358 try: 3359 m = self.REGEX_DSN.match(ruri) 3360 if not m: 3361 raise SyntaxError( 3362 'Parsing uri string(%s) has no result' % self.uri) 3363 dsn = m.group('dsn') 3364 if not dsn: 3365 raise SyntaxError('DSN required') 3366 except SyntaxError: 3367 e = sys.exc_info()[1] 3368 LOGGER.error('NdGpatch error') 3369 raise e 3370 else: 3371 m = self.REGEX_URI.match(uri) 3372 if not m: 3373 raise SyntaxError( 3374 "Invalid URI string in DAL: %s" % self.uri) 3375 user = credential_decoder(m.group('user')) 3376 if not user: 3377 raise SyntaxError('User required') 3378 password = credential_decoder(m.group('password')) 3379 if not password: 3380 password = '' 3381 host = m.group('host') 3382 if not host: 3383 raise SyntaxError('Host name required') 3384 db = m.group('db') 3385 if not db: 3386 raise SyntaxError('Database name required') 3387 port = m.group('port') or '1433' 3388 3389 dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db) 3390 3391 driver_args.update(user = credential_decoder(user), 3392 password = credential_decoder(password)) 3393 3394 def connector(dsn=dsn,driver_args=driver_args): 3395 return self.driver.connect(dsn,**driver_args)
3396 self.connector = connector 3397 if do_connect: self.reconnect()
3398
3399 - def integrity_error_class(self):
3400 return RuntimeError # FIX THIS
3401
3402 3403 -class FireBirdAdapter(BaseAdapter):
3404 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3405 3406 commit_on_alter_table = False 3407 support_distributed_transaction = True 3408 types = { 3409 'boolean': 'CHAR(1)', 3410 'string': 'VARCHAR(%(length)s)', 3411 'text': 'BLOB SUB_TYPE 1', 3412 'json': 'BLOB SUB_TYPE 1', 3413 'password': 'VARCHAR(%(length)s)', 3414 'blob': 'BLOB SUB_TYPE 0', 3415 'upload': 'VARCHAR(%(length)s)', 3416 'integer': 'INTEGER', 3417 'bigint': 'BIGINT', 3418 'float': 'FLOAT', 3419 'double': 'DOUBLE PRECISION', 3420 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3421 'date': 'DATE', 3422 'time': 'TIME', 3423 'datetime': 'TIMESTAMP', 3424 'id': 'INTEGER PRIMARY KEY', 3425 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3426 'list:integer': 'BLOB SUB_TYPE 1', 3427 'list:string': 'BLOB SUB_TYPE 1', 3428 'list:reference': 'BLOB SUB_TYPE 1', 3429 'big-id': 'BIGINT PRIMARY KEY', 3430 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3431 } 3432
3433 - def sequence_name(self,tablename):
3434 return 'genid_%s' % tablename
3435
3436 - def trigger_name(self,tablename):
3437 return 'trg_id_%s' % tablename
3438
3439 - def RANDOM(self):
3440 return 'RAND()'
3441
3442 - def EPOCH(self, first):
3443 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3444
3445 - def NOT_NULL(self,default,field_type):
3446 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3447
3448 - def SUBSTRING(self,field,parameters):
3449 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
3450
3451 - def CONTAINING(self,first,second):
3452 "case in-sensitive like operator" 3453 return '(%s CONTAINING %s)' % (self.expand(first), 3454 self.expand(second, 'string'))
3455
3456 - def CONTAINS(self, first, second, case_sensitive=False):
3457 if first.type in ('string','text'): 3458 key = str(second).replace('%','%%') 3459 elif first.type.startswith('list:'): 3460 key = '|'+str(second).replace('|','||').replace('%','%%')+'|' 3461 return self.CONTAINING(first,second)
3462
3463 - def _drop(self,table,mode):
3464 sequence_name = table._sequence_name 3465 return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
3466
3467 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3468 if limitby: 3469 (lmin, lmax) = limitby 3470 sql_s = ' FIRST %i SKIP %i %s' % (lmax - lmin, lmin, sql_s) 3471 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3472
3473 - def _truncate(self,table,mode = ''):
3474 return ['DELETE FROM %s;' % table._tablename, 3475 'SET GENERATOR %s TO 0;' % table._sequence_name]
3476 3477 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$') 3478
3479 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3480 credential_decoder=IDENTITY, driver_args={}, 3481 adapter_args={}, do_connect=True, after_connection=None):
3482 self.db = db 3483 self.dbengine = "firebird" 3484 self.uri = uri 3485 if do_connect: self.find_driver(adapter_args,uri) 3486 self.pool_size = pool_size 3487 self.folder = folder 3488 self.db_codec = db_codec 3489 self._after_connection = after_connection 3490 self.find_or_make_work_folder() 3491 ruri = uri.split('://',1)[1] 3492 m = self.REGEX_URI.match(ruri) 3493 if not m: 3494 raise SyntaxError("Invalid URI string in DAL: %s" % self.uri) 3495 user = credential_decoder(m.group('user')) 3496 if not user: 3497 raise SyntaxError('User required') 3498 password = credential_decoder(m.group('password')) 3499 if not password: 3500 password = '' 3501 host = m.group('host') 3502 if not host: 3503 raise SyntaxError('Host name required') 3504 port = int(m.group('port') or 3050) 3505 db = m.group('db') 3506 if not db: 3507 raise SyntaxError('Database name required') 3508 charset = m.group('charset') or 'UTF8' 3509 driver_args.update(dsn='%s/%s:%s' % (host,port,db), 3510 user = credential_decoder(user), 3511 password = credential_decoder(password), 3512 charset = charset) 3513 3514 def connector(driver_args=driver_args): 3515 return self.driver.connect(**driver_args)
3516 self.connector = connector 3517 if do_connect: self.reconnect()
3518
3519 - def create_sequence_and_triggers(self, query, table, **args):
3520 tablename = table._tablename 3521 sequence_name = table._sequence_name 3522 trigger_name = table._trigger_name 3523 self.execute(query) 3524 self.execute('create generator %s;' % sequence_name) 3525 self.execute('set generator %s to 0;' % sequence_name) 3526 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
3527
3528 - def lastrowid(self,table):
3529 sequence_name = table._sequence_name 3530 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 3531 return int(self.cursor.fetchone()[0])
3532
3533 3534 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
3535 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3536 3537 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$') 3538
3539 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3540 credential_decoder=IDENTITY, driver_args={}, 3541 adapter_args={}, do_connect=True, after_connection=None):
3542 self.db = db 3543 self.dbengine = "firebird" 3544 self.uri = uri 3545 if do_connect: self.find_driver(adapter_args,uri) 3546 self.pool_size = pool_size 3547 self.folder = folder 3548 self.db_codec = db_codec 3549 self._after_connection = after_connection 3550 self.find_or_make_work_folder() 3551 ruri = uri.split('://',1)[1] 3552 m = self.REGEX_URI.match(ruri) 3553 if not m: 3554 raise SyntaxError( 3555 "Invalid URI string in DAL: %s" % self.uri) 3556 user = credential_decoder(m.group('user')) 3557 if not user: 3558 raise SyntaxError('User required') 3559 password = credential_decoder(m.group('password')) 3560 if not password: 3561 password = '' 3562 pathdb = m.group('path') 3563 if not pathdb: 3564 raise SyntaxError('Path required') 3565 charset = m.group('charset') 3566 if not charset: 3567 charset = 'UTF8' 3568 host = '' 3569 driver_args.update(host=host, 3570 database=pathdb, 3571 user=credential_decoder(user), 3572 password=credential_decoder(password), 3573 charset=charset) 3574 3575 def connector(driver_args=driver_args): 3576 return self.driver.connect(**driver_args)
3577 self.connector = connector 3578 if do_connect: self.reconnect()
3579
3580 -class InformixAdapter(BaseAdapter):
3581 drivers = ('informixdb',) 3582 3583 types = { 3584 'boolean': 'CHAR(1)', 3585 'string': 'VARCHAR(%(length)s)', 3586 'text': 'BLOB SUB_TYPE 1', 3587 'json': 'BLOB SUB_TYPE 1', 3588 'password': 'VARCHAR(%(length)s)', 3589 'blob': 'BLOB SUB_TYPE 0', 3590 'upload': 'VARCHAR(%(length)s)', 3591 'integer': 'INTEGER', 3592 'bigint': 'BIGINT', 3593 'float': 'FLOAT', 3594 'double': 'DOUBLE PRECISION', 3595 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3596 'date': 'DATE', 3597 'time': 'CHAR(8)', 3598 'datetime': 'DATETIME', 3599 'id': 'SERIAL', 3600 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3601 'list:integer': 'BLOB SUB_TYPE 1', 3602 'list:string': 'BLOB SUB_TYPE 1', 3603 'list:reference': 'BLOB SUB_TYPE 1', 3604 'big-id': 'BIGSERIAL', 3605 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3606 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 3607 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 3608 } 3609
3610 - def RANDOM(self):
3611 return 'Random()'
3612
3613 - def NOT_NULL(self,default,field_type):
3614 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3615
3616 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3617 if limitby: 3618 (lmin, lmax) = limitby 3619 fetch_amt = lmax - lmin 3620 dbms_version = int(self.connection.dbms_version.split('.')[0]) 3621 if lmin and (dbms_version >= 10): 3622 # Requires Informix 10.0+ 3623 sql_s += ' SKIP %d' % (lmin, ) 3624 if fetch_amt and (dbms_version >= 9): 3625 # Requires Informix 9.0+ 3626 sql_s += ' FIRST %d' % (fetch_amt, ) 3627 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3628
3629 - def represent_exceptions(self, obj, fieldtype):
3630 if fieldtype == 'date': 3631 if isinstance(obj, (datetime.date, datetime.datetime)): 3632 obj = obj.isoformat()[:10] 3633 else: 3634 obj = str(obj) 3635 return "to_date('%s','%%Y-%%m-%%d')" % obj 3636 elif fieldtype == 'datetime': 3637 if isinstance(obj, datetime.datetime): 3638 obj = obj.isoformat()[:19].replace('T',' ') 3639 elif isinstance(obj, datetime.date): 3640 obj = obj.isoformat()[:10]+' 00:00:00' 3641 else: 3642 obj = str(obj) 3643 return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj 3644 return None
3645 3646 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 3647
3648 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3649 credential_decoder=IDENTITY, driver_args={}, 3650 adapter_args={}, do_connect=True, after_connection=None):
3651 self.db = db 3652 self.dbengine = "informix" 3653 self.uri = uri 3654 if do_connect: self.find_driver(adapter_args,uri) 3655 self.pool_size = pool_size 3656 self.folder = folder 3657 self.db_codec = db_codec 3658 self._after_connection = after_connection 3659 self.find_or_make_work_folder() 3660 ruri = uri.split('://',1)[1] 3661 m = self.REGEX_URI.match(ruri) 3662 if not m: 3663 raise SyntaxError( 3664 "Invalid URI string in DAL: %s" % self.uri) 3665 user = credential_decoder(m.group('user')) 3666 if not user: 3667 raise SyntaxError('User required') 3668 password = credential_decoder(m.group('password')) 3669 if not password: 3670 password = '' 3671 host = m.group('host') 3672 if not host: 3673 raise SyntaxError('Host name required') 3674 db = m.group('db') 3675 if not db: 3676 raise SyntaxError('Database name required') 3677 user = credential_decoder(user) 3678 password = credential_decoder(password) 3679 dsn = '%s@%s' % (db,host) 3680 driver_args.update(user=user,password=password,autocommit=True) 3681 def connector(dsn=dsn,driver_args=driver_args): 3682 return self.driver.connect(dsn,**driver_args)
3683 self.connector = connector 3684 if do_connect: self.reconnect()
3685
3686 - def execute(self,command):
3687 if command[-1:]==';': 3688 command = command[:-1] 3689 return self.log_execute(command)
3690
3691 - def lastrowid(self,table):
3692 return self.cursor.sqlerrd[1]
3693
3694 - def integrity_error_class(self):
3695 return informixdb.IntegrityError
3696
3697 -class InformixSEAdapter(InformixAdapter):
3698 """ work in progress """ 3699
3700 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3701 return 'SELECT %s %s FROM %s%s%s;' % \ 3702 (sql_s, sql_f, sql_t, sql_w, sql_o)
3703
3704 - def rowslice(self,rows,minimum=0,maximum=None):
3705 if maximum is None: 3706 return rows[minimum:] 3707 return rows[minimum:maximum]
3708
3709 -class DB2Adapter(BaseAdapter):
3710 drivers = ('pyodbc',) 3711 3712 types = { 3713 'boolean': 'CHAR(1)', 3714 'string': 'VARCHAR(%(length)s)', 3715 'text': 'CLOB', 3716 'json': 'CLOB', 3717 'password': 'VARCHAR(%(length)s)', 3718 'blob': 'BLOB', 3719 'upload': 'VARCHAR(%(length)s)', 3720 'integer': 'INT', 3721 'bigint': 'BIGINT', 3722 'float': 'REAL', 3723 'double': 'DOUBLE', 3724 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3725 'date': 'DATE', 3726 'time': 'TIME', 3727 'datetime': 'TIMESTAMP', 3728 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3729 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3730 'list:integer': 'CLOB', 3731 'list:string': 'CLOB', 3732 'list:reference': 'CLOB', 3733 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3734 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3735 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3736 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3737 } 3738
3739 - def LEFT_JOIN(self):
3740 return 'LEFT OUTER JOIN'
3741
3742 - def RANDOM(self):
3743 return 'RAND()'
3744
3745 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3746 if limitby: 3747 (lmin, lmax) = limitby 3748 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 3749 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3750
3751 - def represent_exceptions(self, obj, fieldtype):
3752 if fieldtype == 'blob': 3753 obj = base64.b64encode(str(obj)) 3754 return "BLOB('%s')" % obj 3755 elif fieldtype == 'datetime': 3756 if isinstance(obj, datetime.datetime): 3757 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 3758 elif isinstance(obj, datetime.date): 3759 obj = obj.isoformat()[:10]+'-00.00.00' 3760 return "'%s'" % obj 3761 return None
3762
3763 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3764 credential_decoder=IDENTITY, driver_args={}, 3765 adapter_args={}, do_connect=True, after_connection=None):
3766 self.db = db 3767 self.dbengine = "db2" 3768 self.uri = uri 3769 if do_connect: self.find_driver(adapter_args,uri) 3770 self.pool_size = pool_size 3771 self.folder = folder 3772 self.db_codec = db_codec 3773 self._after_connection = after_connection 3774 self.find_or_make_work_folder() 3775 ruri = uri.split('://', 1)[1] 3776 def connector(cnxn=ruri,driver_args=driver_args): 3777 return self.driver.connect(cnxn,**driver_args)
3778 self.connector = connector 3779 if do_connect: self.reconnect()
3780
3781 - def execute(self,command):
3782 if command[-1:]==';': 3783 command = command[:-1] 3784 return self.log_execute(command)
3785
3786 - def lastrowid(self,table):
3787 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 3788 return int(self.cursor.fetchone()[0])
3789
3790 - def rowslice(self,rows,minimum=0,maximum=None):
3791 if maximum is None: 3792 return rows[minimum:] 3793 return rows[minimum:maximum]
3794
3795 3796 -class TeradataAdapter(BaseAdapter):
3797 drivers = ('pyodbc',) 3798 3799 types = { 3800 'boolean': 'CHAR(1)', 3801 'string': 'VARCHAR(%(length)s)', 3802 'text': 'CLOB', 3803 'json': 'CLOB', 3804 'password': 'VARCHAR(%(length)s)', 3805 'blob': 'BLOB', 3806 'upload': 'VARCHAR(%(length)s)', 3807 'integer': 'INT', 3808 'bigint': 'BIGINT', 3809 'float': 'REAL', 3810 'double': 'DOUBLE', 3811 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3812 'date': 'DATE', 3813 'time': 'TIME', 3814 'datetime': 'TIMESTAMP', 3815 # Modified Constraint syntax for Teradata. 3816 # Teradata does not support ON DELETE. 3817 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3818 'reference': 'INT', 3819 'list:integer': 'CLOB', 3820 'list:string': 'CLOB', 3821 'list:reference': 'CLOB', 3822 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3823 'big-reference': 'BIGINT', 3824 'reference FK': ' REFERENCES %(foreign_key)s', 3825 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)', 3826 } 3827
3828 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3829 credential_decoder=IDENTITY, driver_args={}, 3830 adapter_args={}, do_connect=True, after_connection=None):
3831 self.db = db 3832 self.dbengine = "teradata" 3833 self.uri = uri 3834 if do_connect: self.find_driver(adapter_args,uri) 3835 self.pool_size = pool_size 3836 self.folder = folder 3837 self.db_codec = db_codec 3838 self._after_connection = after_connection 3839 self.find_or_make_work_folder() 3840 ruri = uri.split('://', 1)[1] 3841 def connector(cnxn=ruri,driver_args=driver_args): 3842 return self.driver.connect(cnxn,**driver_args)
3843 self.connector = connector 3844 if do_connect: self.reconnect()
3845
3846 - def LEFT_JOIN(self):
3847 return 'LEFT OUTER JOIN'
3848 3849 # Similar to MSSQL, Teradata can't specify a range (for Pageby)
3850 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3851 if limitby: 3852 (lmin, lmax) = limitby 3853 sql_s += ' TOP %i' % lmax 3854 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3855
3856 - def _truncate(self, table, mode=''):
3857 tablename = table._tablename 3858 return ['DELETE FROM %s ALL;' % (tablename)]
3859 3860 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
3861 # (ANSI-SQL wants this form of name 3862 # to be a delimited identifier) 3863 3864 -class IngresAdapter(BaseAdapter):
3865 drivers = ('ingresdbi',) 3866 3867 types = { 3868 'boolean': 'CHAR(1)', 3869 'string': 'VARCHAR(%(length)s)', 3870 'text': 'CLOB', 3871 'json': 'CLOB', 3872 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 3873 'blob': 'BLOB', 3874 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 3875 'integer': 'INTEGER4', # or int8... 3876 'bigint': 'BIGINT', 3877 'float': 'FLOAT', 3878 'double': 'FLOAT8', 3879 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3880 'date': 'ANSIDATE', 3881 'time': 'TIME WITHOUT TIME ZONE', 3882 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 3883 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME, 3884 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3885 'list:integer': 'CLOB', 3886 'list:string': 'CLOB', 3887 'list:reference': 'CLOB', 3888 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME, 3889 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3890 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3891 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 3892 } 3893
3894 - def LEFT_JOIN(self):
3895 return 'LEFT OUTER JOIN'
3896
3897 - def RANDOM(self):
3898 return 'RANDOM()'
3899
3900 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3901 if limitby: 3902 (lmin, lmax) = limitby 3903 fetch_amt = lmax - lmin 3904 if fetch_amt: 3905 sql_s += ' FIRST %d ' % (fetch_amt, ) 3906 if lmin: 3907 # Requires Ingres 9.2+ 3908 sql_o += ' OFFSET %d' % (lmin, ) 3909 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3910
3911 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3912 credential_decoder=IDENTITY, driver_args={}, 3913 adapter_args={}, do_connect=True, after_connection=None):
3914 self.db = db 3915 self.dbengine = "ingres" 3916 self.uri = uri 3917 if do_connect: self.find_driver(adapter_args,uri) 3918 self.pool_size = pool_size 3919 self.folder = folder 3920 self.db_codec = db_codec 3921 self._after_connection = after_connection 3922 self.find_or_make_work_folder() 3923 connstr = self._uri.split(':', 1)[1] 3924 # Simple URI processing 3925 connstr = connstr.lstrip() 3926 while connstr.startswith('/'): 3927 connstr = connstr[1:] 3928 database_name=connstr # Assume only (local) dbname is passed in 3929 vnode = '(local)' 3930 servertype = 'ingres' 3931 trace = (0, None) # No tracing 3932 driver_args.update(database=database_name, 3933 vnode=vnode, 3934 servertype=servertype, 3935 trace=trace) 3936 def connector(driver_args=driver_args): 3937 return self.driver.connect(**driver_args)
3938 self.connector = connector 3939 if do_connect: self.reconnect()
3940
3941 - def create_sequence_and_triggers(self, query, table, **args):
3942 # post create table auto inc code (if needed) 3943 # modify table to btree for performance.... 3944 # Older Ingres releases could use rule/trigger like Oracle above. 3945 if hasattr(table,'_primarykey'): 3946 modify_tbl_sql = 'modify %s to btree unique on %s' % \ 3947 (table._tablename, 3948 ', '.join(["'%s'" % x for x in table.primarykey])) 3949 self.execute(modify_tbl_sql) 3950 else: 3951 tmp_seqname='%s_iisq' % table._tablename 3952 query=query.replace(INGRES_SEQNAME, tmp_seqname) 3953 self.execute('create sequence %s' % tmp_seqname) 3954 self.execute(query) 3955 self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
3956 3957
3958 - def lastrowid(self,table):
3959 tmp_seqname='%s_iisq' % table 3960 self.execute('select current value for %s' % tmp_seqname) 3961 return int(self.cursor.fetchone()[0]) # don't really need int type cast here...
3962
3963 - def integrity_error_class(self):
3964 return ingresdbi.IntegrityError
3965
3966 3967 -class IngresUnicodeAdapter(IngresAdapter):
3968 3969 drivers = ('ingresdbi',) 3970 3971 types = { 3972 'boolean': 'CHAR(1)', 3973 'string': 'NVARCHAR(%(length)s)', 3974 'text': 'NCLOB', 3975 'json': 'NCLOB', 3976 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 3977 'blob': 'BLOB', 3978 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 3979 'integer': 'INTEGER4', # or int8... 3980 'bigint': 'BIGINT', 3981 'float': 'FLOAT', 3982 'double': 'FLOAT8', 3983 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3984 'date': 'ANSIDATE', 3985 'time': 'TIME WITHOUT TIME ZONE', 3986 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 3987 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME, 3988 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3989 'list:integer': 'NCLOB', 3990 'list:string': 'NCLOB', 3991 'list:reference': 'NCLOB', 3992 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME, 3993 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3994 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3995 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 3996 }
3997
3998 -class SAPDBAdapter(BaseAdapter):
3999 drivers = ('sapdb',) 4000 4001 support_distributed_transaction = False 4002 types = { 4003 'boolean': 'CHAR(1)', 4004 'string': 'VARCHAR(%(length)s)', 4005 'text': 'LONG', 4006 'json': 'LONG', 4007 'password': 'VARCHAR(%(length)s)', 4008 'blob': 'LONG', 4009 'upload': 'VARCHAR(%(length)s)', 4010 'integer': 'INT', 4011 'bigint': 'BIGINT', 4012 'float': 'FLOAT', 4013 'double': 'DOUBLE PRECISION', 4014 'decimal': 'FIXED(%(precision)s,%(scale)s)', 4015 'date': 'DATE', 4016 'time': 'TIME', 4017 'datetime': 'TIMESTAMP', 4018 'id': 'INT PRIMARY KEY', 4019 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4020 'list:integer': 'LONG', 4021 'list:string': 'LONG', 4022 'list:reference': 'LONG', 4023 'big-id': 'BIGINT PRIMARY KEY', 4024 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4025 } 4026
4027 - def sequence_name(self,table):
4028 return '%s_id_Seq' % table
4029
4030 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4031 if limitby: 4032 (lmin, lmax) = limitby 4033 if len(sql_w) > 1: 4034 sql_w_row = sql_w + ' AND w_row > %i' % lmin 4035 else: 4036 sql_w_row = 'WHERE w_row > %i' % lmin 4037 return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 4038 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4039
4040 - def create_sequence_and_triggers(self, query, table, **args):
4041 # following lines should only be executed if table._sequence_name does not exist 4042 self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 4043 self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 4044 % (table._tablename, table._id.name, table._sequence_name)) 4045 self.execute(query)
4046 4047 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 4048 4049
4050 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4051 credential_decoder=IDENTITY, driver_args={}, 4052 adapter_args={}, do_connect=True, after_connection=None):
4053 self.db = db 4054 self.dbengine = "sapdb" 4055 self.uri = uri 4056 if do_connect: self.find_driver(adapter_args,uri) 4057 self.pool_size = pool_size 4058 self.folder = folder 4059 self.db_codec = db_codec 4060 self._after_connection = after_connection 4061 self.find_or_make_work_folder() 4062 ruri = uri.split('://',1)[1] 4063 m = self.REGEX_URI.match(ruri) 4064 if not m: 4065 raise SyntaxError("Invalid URI string in DAL") 4066 user = credential_decoder(m.group('user')) 4067 if not user: 4068 raise SyntaxError('User required') 4069 password = credential_decoder(m.group('password')) 4070 if not password: 4071 password = '' 4072 host = m.group('host') 4073 if not host: 4074 raise SyntaxError('Host name required') 4075 db = m.group('db') 4076 if not db: 4077 raise SyntaxError('Database name required') 4078 def connector(user=user, password=password, database=db, 4079 host=host, driver_args=driver_args): 4080 return self.driver.Connection(user, password, database, 4081 host, **driver_args)
4082 self.connector = connector 4083 if do_connect: self.reconnect()
4084
4085 - def lastrowid(self,table):
4086 self.execute("select %s.NEXTVAL from dual" % table._sequence_name) 4087 return int(self.cursor.fetchone()[0])
4088
4089 -class CubridAdapter(MySQLAdapter):
4090 drivers = ('cubriddb',) 4091 4092 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 4093
4094 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 4095 credential_decoder=IDENTITY, driver_args={}, 4096 adapter_args={}, do_connect=True, after_connection=None):
4097 self.db = db 4098 self.dbengine = "cubrid" 4099 self.uri = uri 4100 if do_connect: self.find_driver(adapter_args,uri) 4101 self.pool_size = pool_size 4102 self.folder = folder 4103 self.db_codec = db_codec 4104 self._after_connection = after_connection 4105 self.find_or_make_work_folder() 4106 ruri = uri.split('://',1)[1] 4107 m = self.REGEX_URI.match(ruri) 4108 if not m: 4109 raise SyntaxError( 4110 "Invalid URI string in DAL: %s" % self.uri) 4111 user = credential_decoder(m.group('user')) 4112 if not user: 4113 raise SyntaxError('User required') 4114 password = credential_decoder(m.group('password')) 4115 if not password: 4116 password = '' 4117 host = m.group('host') 4118 if not host: 4119 raise SyntaxError('Host name required') 4120 db = m.group('db') 4121 if not db: 4122 raise SyntaxError('Database name required') 4123 port = int(m.group('port') or '30000') 4124 charset = m.group('charset') or 'utf8' 4125 user = credential_decoder(user) 4126 passwd = credential_decoder(password) 4127 def connector(host=host,port=port,db=db, 4128 user=user,passwd=password,driver_args=driver_args): 4129 return self.driver.connect(host,port,db,user,passwd,**driver_args)
4130 self.connector = connector 4131 if do_connect: self.reconnect()
4132
4133 - def after_connection(self):
4134 self.execute('SET FOREIGN_KEY_CHECKS=1;') 4135 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4136
4137 4138 ######## GAE MySQL ########## 4139 4140 -class DatabaseStoredFile:
4141 4142 web2py_filesystem = False 4143
4144 - def escape(self,obj):
4145 return self.db._adapter.escape(obj)
4146
4147 - def __init__(self,db,filename,mode):
4148 if not db._adapter.dbengine in ('mysql', 'postgres'): 4149 raise RuntimeError("only MySQL/Postgres can store metadata .table files in database for now") 4150 self.db = db 4151 self.filename = filename 4152 self.mode = mode 4153 if not self.web2py_filesystem: 4154 if db._adapter.dbengine == 'mysql': 4155 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;" 4156 elif db._adapter.dbengine == 'postgres': 4157 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));" 4158 self.db.executesql(sql) 4159 DatabaseStoredFile.web2py_filesystem = True 4160 self.p=0 4161 self.data = '' 4162 if mode in ('r','rw','a'): 4163 query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ 4164 % filename 4165 rows = self.db.executesql(query) 4166 if rows: 4167 self.data = rows[0][0] 4168 elif exists(filename): 4169 datafile = open(filename, 'r') 4170 try: 4171 self.data = datafile.read() 4172 finally: 4173 datafile.close() 4174 elif mode in ('r','rw'): 4175 raise RuntimeError("File %s does not exist" % filename)
4176
4177 - def read(self, bytes):
4178 data = self.data[self.p:self.p+bytes] 4179 self.p += len(data) 4180 return data
4181
4182 - def readline(self):
4183 i = self.data.find('\n',self.p)+1 4184 if i>0: 4185 data, self.p = self.data[self.p:i], i 4186 else: 4187 data, self.p = self.data[self.p:], len(self.data) 4188 return data
4189
4190 - def write(self,data):
4191 self.data += data
4192
4193 - def close_connection(self):
4194 if self.db is not None: 4195 self.db.executesql( 4196 "DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) 4197 query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\ 4198 % (self.filename, self.data.replace("'","''")) 4199 self.db.executesql(query) 4200 self.db.commit() 4201 self.db = None
4202
4203 - def close(self):
4204 self.close_connection()
4205 4206 @staticmethod
4207 - def exists(db, filename):
4208 if exists(filename): 4209 return True 4210 query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename 4211 if db.executesql(query): 4212 return True 4213 return False
4214
4215 4216 -class UseDatabaseStoredFile:
4217
4218 - def file_exists(self, filename):
4219 return DatabaseStoredFile.exists(self.db,filename)
4220
4221 - def file_open(self, filename, mode='rb', lock=True):
4222 return DatabaseStoredFile(self.db,filename,mode)
4223
4224 - def file_close(self, fileobj):
4225 fileobj.close_connection()
4226
4227 - def file_delete(self,filename):
4228 query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename 4229 self.db.executesql(query) 4230 self.db.commit()
4231
4232 -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
4233 uploads_in_blob = True 4234 4235 REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$') 4236
4237 - def __init__(self, db, uri='google:sql://realm:domain/database', 4238 pool_size=0, folder=None, db_codec='UTF-8', 4239 credential_decoder=IDENTITY, driver_args={}, 4240 adapter_args={}, do_connect=True, after_connection=None):
4241 4242 self.db = db 4243 self.dbengine = "mysql" 4244 self.uri = uri 4245 self.pool_size = pool_size 4246 self.db_codec = db_codec 4247 self._after_connection = after_connection 4248 self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split( 4249 os.sep+'applications'+os.sep,1)[1]) 4250 ruri = uri.split("://")[1] 4251 m = self.REGEX_URI.match(ruri) 4252 if not m: 4253 raise SyntaxError("Invalid URI string in SQLDB: %s" % self.uri) 4254 instance = credential_decoder(m.group('instance')) 4255 self.dbstring = db = credential_decoder(m.group('db')) 4256 driver_args['instance'] = instance 4257 if not 'charset' in driver_args: 4258 driver_args['charset'] = 'utf8' 4259 self.createdb = createdb = adapter_args.get('createdb',True) 4260 if not createdb: 4261 driver_args['database'] = db 4262 def connector(driver_args=driver_args): 4263 return rdbms.connect(**driver_args)
4264 self.connector = connector 4265 if do_connect: self.reconnect()
4266
4267 - def after_connection(self):
4268 if self.createdb: 4269 # self.execute('DROP DATABASE %s' % self.dbstring) 4270 self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring) 4271 self.execute('USE %s' % self.dbstring) 4272 self.execute("SET FOREIGN_KEY_CHECKS=1;") 4273 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4274
4275 - def execute(self, command, *a, **b):
4276 return self.log_execute(command.decode('utf8'), *a, **b)
4277
4278 -class NoSQLAdapter(BaseAdapter):
4279 can_select_for_update = False 4280 4281 @staticmethod
4282 - def to_unicode(obj):
4283 if isinstance(obj, str): 4284 return obj.decode('utf8') 4285 elif not isinstance(obj, unicode): 4286 return unicode(obj) 4287 return obj
4288
4289 - def id_query(self, table):
4290 return table._id > 0
4291
4292 - def represent(self, obj, fieldtype):
4293 field_is_type = fieldtype.startswith 4294 if isinstance(obj, CALLABLETYPES): 4295 obj = obj() 4296 if isinstance(fieldtype, SQLCustomType): 4297 return fieldtype.encoder(obj) 4298 if isinstance(obj, (Expression, Field)): 4299 raise SyntaxError("non supported on GAE") 4300 if self.dbengine == 'google:datastore': 4301 if isinstance(fieldtype, gae.Property): 4302 return obj 4303 is_string = isinstance(fieldtype,str) 4304 is_list = is_string and field_is_type('list:') 4305 if is_list: 4306 if not obj: 4307 obj = [] 4308 if not isinstance(obj, (list, tuple)): 4309 obj = [obj] 4310 if obj == '' and not \ 4311 (is_string and fieldtype[:2] in ['st','te', 'pa','up']): 4312 return None 4313 if not obj is None: 4314 if isinstance(obj, list) and not is_list: 4315 obj = [self.represent(o, fieldtype) for o in obj] 4316 elif fieldtype in ('integer','bigint','id'): 4317 obj = long(obj) 4318 elif fieldtype == 'double': 4319 obj = float(obj) 4320 elif is_string and field_is_type('reference'): 4321 if isinstance(obj, (Row, Reference)): 4322 obj = obj['id'] 4323 obj = long(obj) 4324 elif fieldtype == 'boolean': 4325 if obj and not str(obj)[0].upper() in '0F': 4326 obj = True 4327 else: 4328 obj = False 4329 elif fieldtype == 'date': 4330 if not isinstance(obj, datetime.date): 4331 (y, m, d) = map(int,str(obj).strip().split('-')) 4332 obj = datetime.date(y, m, d) 4333 elif isinstance(obj,datetime.datetime): 4334 (y, m, d) = (obj.year, obj.month, obj.day) 4335 obj = datetime.date(y, m, d) 4336 elif fieldtype == 'time': 4337 if not isinstance(obj, datetime.time): 4338 time_items = map(int,str(obj).strip().split(':')[:3]) 4339 if len(time_items) == 3: 4340 (h, mi, s) = time_items 4341 else: 4342 (h, mi, s) = time_items + [0] 4343 obj = datetime.time(h, mi, s) 4344 elif fieldtype == 'datetime': 4345 if not isinstance(obj, datetime.datetime): 4346 (y, m, d) = map(int,str(obj)[:10].strip().split('-')) 4347 time_items = map(int,str(obj)[11:].strip().split(':')[:3]) 4348 while len(time_items)<3: 4349 time_items.append(0) 4350 (h, mi, s) = time_items 4351 obj = datetime.datetime(y, m, d, h, mi, s) 4352 elif fieldtype == 'blob': 4353 pass 4354 elif fieldtype == 'json': 4355 obj = self.to_unicode(obj) 4356 if have_serializers: 4357 obj = serializers.loads_json(obj) 4358 elif simplejson: 4359 obj = simplejson.loads(obj) 4360 else: 4361 raise RuntimeError("missing simplejson") 4362 elif is_string and field_is_type('list:string'): 4363 return map(self.to_unicode,obj) 4364 elif is_list: 4365 return map(int,obj) 4366 else: 4367 obj = self.to_unicode(obj) 4368 return obj
4369
4370 - def _insert(self,table,fields):
4371 return 'insert %s in %s' % (fields, table)
4372
4373 - def _count(self,query,distinct=None):
4374 return 'count %s' % repr(query)
4375
4376 - def _select(self,query,fields,attributes):
4377 return 'select %s where %s' % (repr(fields), repr(query))
4378
4379 - def _delete(self,tablename, query):
4380 return 'delete %s where %s' % (repr(tablename),repr(query))
4381
4382 - def _update(self,tablename,query,fields):
4383 return 'update %s (%s) where %s' % (repr(tablename), 4384 repr(fields),repr(query))
4385
4386 - def commit(self):
4387 """ 4388 remember: no transactions on many NoSQL 4389 """ 4390 pass
4391
4392 - def rollback(self):
4393 """ 4394 remember: no transactions on many NoSQL 4395 """ 4396 pass
4397
4398 - def close_connection(self):
4399 """ 4400 remember: no transactions on many NoSQL 4401 """ 4402 pass
4403 4404 4405 # these functions should never be called!
4406 - def OR(self,first,second): raise SyntaxError("Not supported")
4407 - def AND(self,first,second): raise SyntaxError("Not supported")
4408 - def AS(self,first,second): raise SyntaxError("Not supported")
4409 - def ON(self,first,second): raise SyntaxError("Not supported")
4410 - def STARTSWITH(self,first,second=None): raise SyntaxError("Not supported")
4411 - def ENDSWITH(self,first,second=None): raise SyntaxError("Not supported")
4412 - def ADD(self,first,second): raise SyntaxError("Not supported")
4413 - def SUB(self,first,second): raise SyntaxError("Not supported")
4414 - def MUL(self,first,second): raise SyntaxError("Not supported")
4415 - def DIV(self,first,second): raise SyntaxError("Not supported")
4416 - def LOWER(self,first): raise SyntaxError("Not supported")
4417 - def UPPER(self,first): raise SyntaxError("Not supported")
4418 - def EXTRACT(self,first,what): raise SyntaxError("Not supported")
4419 - def AGGREGATE(self,first,what): raise SyntaxError("Not supported")
4420 - def LEFT_JOIN(self): raise SyntaxError("Not supported")
4421 - def RANDOM(self): raise SyntaxError("Not supported")
4422 - def SUBSTRING(self,field,parameters): raise SyntaxError("Not supported")
4423 - def PRIMARY_KEY(self,key): raise SyntaxError("Not supported")
4424 - def ILIKE(self,first,second): raise SyntaxError("Not supported")
4425 - def drop(self,table,mode): raise SyntaxError("Not supported")
4426 - def alias(self,table,alias): raise SyntaxError("Not supported")
4427 - def migrate_table(self,*a,**b): raise SyntaxError("Not supported")
4428 - def distributed_transaction_begin(self,key): raise SyntaxError("Not supported")
4429 - def prepare(self,key): raise SyntaxError("Not supported")
4430 - def commit_prepared(self,key): raise SyntaxError("Not supported")
4431 - def rollback_prepared(self,key): raise SyntaxError("Not supported")
4432 - def concat_add(self,table): raise SyntaxError("Not supported")
4433 - def constraint_name(self, table, fieldname): raise SyntaxError("Not supported")
4434 - def create_sequence_and_triggers(self, query, table, **args): pass
4435 - def log_execute(self,*a,**b): raise SyntaxError("Not supported")
4436 - def execute(self,*a,**b): raise SyntaxError("Not supported")
4437 - def represent_exceptions(self, obj, fieldtype): raise SyntaxError("Not supported")
4438 - def lastrowid(self,table): raise SyntaxError("Not supported")
4439 - def integrity_error_class(self): raise SyntaxError("Not supported")
4440 - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError("Not supported")
4441
4442 4443 -class GAEF(object):
4444 - def __init__(self,name,op,value,apply):
4445 self.name=name=='id' and '__key__' or name 4446 self.op=op 4447 self.value=value 4448 self.apply=apply
4449 - def __repr__(self):
4450 return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
4451
4452 -class GoogleDatastoreAdapter(NoSQLAdapter):
4453 uploads_in_blob = True 4454 types = {} 4455
4456 - def file_exists(self, filename): pass
4457 - def file_open(self, filename, mode='rb', lock=True): pass
4458 - def file_close(self, fileobj): pass
4459 4460 REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)') 4461
4462 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4463 credential_decoder=IDENTITY, driver_args={}, 4464 adapter_args={}, do_connect=True, after_connection=None):
4465 self.types.update({ 4466 'boolean': gae.BooleanProperty, 4467 'string': (lambda: gae.StringProperty(multiline=True)), 4468 'text': gae.TextProperty, 4469 'json': gae.TextProperty, 4470 'password': gae.StringProperty, 4471 'blob': gae.BlobProperty, 4472 'upload': gae.StringProperty, 4473 'integer': gae.IntegerProperty, 4474 'bigint': gae.IntegerProperty, 4475 'float': gae.FloatProperty, 4476 'double': gae.FloatProperty, 4477 'decimal': GAEDecimalProperty, 4478 'date': gae.DateProperty, 4479 'time': gae.TimeProperty, 4480 'datetime': gae.DateTimeProperty, 4481 'id': None, 4482 'reference': gae.IntegerProperty, 4483 'list:string': (lambda: gae.StringListProperty(default=None)), 4484 'list:integer': (lambda: gae.ListProperty(int,default=None)), 4485 'list:reference': (lambda: gae.ListProperty(int,default=None)), 4486 }) 4487 self.db = db 4488 self.uri = uri 4489 self.dbengine = 'google:datastore' 4490 self.folder = folder 4491 db['_lastsql'] = '' 4492 self.db_codec = 'UTF-8' 4493 self._after_connection = after_connection 4494 self.pool_size = 0 4495 match = self.REGEX_NAMESPACE.match(uri) 4496 if match: 4497 namespace_manager.set_namespace(match.group('namespace'))
4498
4499 - def parse_id(self, value, field_type):
4500 return value
4501
4502 - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
4503 myfields = {} 4504 for field in table: 4505 if isinstance(polymodel,Table) and field.name in polymodel.fields(): 4506 continue 4507 attr = {} 4508 if isinstance(field.custom_qaulifier, dict): 4509 #this is custom properties to add to the GAE field declartion 4510 attr = field.custom_qualifier 4511 field_type = field.type 4512 if isinstance(field_type, SQLCustomType): 4513 ftype = self.types[field_type.native or field_type.type](**attr) 4514 elif isinstance(field_type, gae.Property): 4515 ftype = field_type 4516 elif field_type.startswith('id'): 4517 continue 4518 elif field_type.startswith('decimal'): 4519 precision, scale = field_type[7:].strip('()').split(',') 4520 precision = int(precision) 4521 scale = int(scale) 4522 ftype = GAEDecimalProperty(precision, scale, **attr) 4523 elif field_type.startswith('reference'): 4524 if field.notnull: 4525 attr = dict(required=True) 4526 referenced = field_type[10:].strip() 4527 ftype = self.types[field_type[:9]](referenced, **attr) 4528 elif field_type.startswith('list:reference'): 4529 if field.notnull: 4530 attr['required'] = True 4531 referenced = field_type[15:].strip() 4532 ftype = self.types[field_type[:14]](**attr) 4533 elif field_type.startswith('list:'): 4534 ftype = self.types[field_type](**attr) 4535 elif not field_type in self.types\ 4536 or not self.types[field_type]: 4537 raise SyntaxError('Field: unknown field type: %s' % field_type) 4538 else: 4539 ftype = self.types[field_type](**attr) 4540 myfields[field.name] = ftype 4541 if not polymodel: 4542 table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) 4543 elif polymodel==True: 4544 table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) 4545 elif isinstance(polymodel,Table): 4546 table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) 4547 else: 4548 raise SyntaxError("polymodel must be None, True, a table or a tablename") 4549 return None
4550
4551 - def expand(self,expression,field_type=None):
4552 if isinstance(expression,Field): 4553 if expression.type in ('text', 'blob', 'json'): 4554 raise SyntaxError('AppEngine does not index by: %s' % expression.type) 4555 return expression.name 4556 elif isinstance(expression, (Expression, Query)): 4557 if not expression.second is None: 4558 return expression.op(expression.first, expression.second) 4559 elif not expression.first is None: 4560 return expression.op(expression.first) 4561 else: 4562 return expression.op() 4563 elif field_type: 4564 return self.represent(expression,field_type) 4565 elif isinstance(expression,(list,tuple)): 4566 return ','.join([self.represent(item,field_type) for item in expression]) 4567 else: 4568 return str(expression)
4569 4570 ### TODO from gql.py Expression
4571 - def AND(self,first,second):
4572 a = self.expand(first) 4573 b = self.expand(second) 4574 if b[0].name=='__key__' and a[0].name!='__key__': 4575 return b+a 4576 return a+b
4577
4578 - def EQ(self,first,second=None):
4579 if isinstance(second, Key): 4580 return [GAEF(first.name,'=',second,lambda a,b:a==b)] 4581 return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
4582
4583 - def NE(self,first,second=None):
4584 if first.type != 'id': 4585 return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] 4586 else: 4587 if not second is None: 4588 second = Key.from_path(first._tablename, long(second)) 4589 return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
4590
4591 - def LT(self,first,second=None):
4592 if first.type != 'id': 4593 return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] 4594 else: 4595 second = Key.from_path(first._tablename, long(second)) 4596 return [GAEF(first.name,'<',second,lambda a,b:a<b)]
4597
4598 - def LE(self,first,second=None):
4599 if first.type != 'id': 4600 return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] 4601 else: 4602 second = Key.from_path(first._tablename, long(second)) 4603 return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
4604
4605 - def GT(self,first,second=None):
4606 if first.type != 'id' or second==0 or second == '0': 4607 return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] 4608 else: 4609 second = Key.from_path(first._tablename, long(second)) 4610 return [GAEF(first.name,'>',second,lambda a,b:a>b)]
4611
4612 - def GE(self,first,second=None):
4613 if first.type != 'id': 4614 return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] 4615 else: 4616 second = Key.from_path(first._tablename, long(second)) 4617 return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
4618
4619 - def INVERT(self,first):
4620 return '-%s' % first.name
4621
4622 - def COMMA(self,first,second):
4623 return '%s, %s' % (self.expand(first),self.expand(second))
4624
4625 - def BELONGS(self,first,second=None):
4626 if not isinstance(second,(list, tuple)): 4627 raise SyntaxError("Not supported") 4628 if first.type != 'id': 4629 return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] 4630 else: 4631 second = [Key.from_path(first._tablename, int(i)) for i in second] 4632 return [GAEF(first.name,'in',second,lambda a,b:a in b)]
4633
4634 - def CONTAINS(self,first,second,case_sensitive=False):
4635 # silently ignoring: GAE can only do case sensitive matches! 4636 if not first.type.startswith('list:'): 4637 raise SyntaxError("Not supported") 4638 return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)]
4639
4640 - def NOT(self,first):
4641 nops = { self.EQ: self.NE, 4642 self.NE: self.EQ, 4643 self.LT: self.GE, 4644 self.GT: self.LE, 4645 self.LE: self.GT, 4646 self.GE: self.LT} 4647 if not isinstance(first,Query): 4648 raise SyntaxError("Not suported") 4649 nop = nops.get(first.op,None) 4650 if not nop: 4651 raise SyntaxError("Not suported %s" % first.op.__name__) 4652 first.op = nop 4653 return self.expand(first)
4654
4655 - def truncate(self,table,mode):
4656 self.db(table._id).delete()
4657
4658 - def select_raw(self,query,fields=None,attributes=None):
4659 db = self.db 4660 fields = fields or [] 4661 attributes = attributes or {} 4662 args_get = attributes.get 4663 new_fields = [] 4664 for item in fields: 4665 if isinstance(item,SQLALL): 4666 new_fields += item._table 4667 else: 4668 new_fields.append(item) 4669 fields = new_fields 4670 if query: 4671 tablename = self.get_table(query) 4672 elif fields: 4673 tablename = fields[0].tablename 4674 query = db._adapter.id_query(fields[0].table) 4675 else: 4676 raise SyntaxError("Unable to determine a tablename") 4677 4678 if query: 4679 if use_common_filters(query): 4680 query = self.common_filter(query,[tablename]) 4681 4682 #tableobj is a GAE Model class (or subclass) 4683 tableobj = db[tablename]._tableobj 4684 filters = self.expand(query) 4685 4686 projection = None 4687 if len(db[tablename].fields) == len(fields): 4688 #getting all fields, not a projection query 4689 projection = None 4690 elif args_get('projection') == True: 4691 projection = [] 4692 for f in fields: 4693 if f.type in ['text', 'blob', 'json']: 4694 raise SyntaxError( 4695 "text and blob field types not allowed in projection queries") 4696 else: 4697 projection.append(f.name) 4698 4699 # projection's can't include 'id'. 4700 # it will be added to the result later 4701 query_projection = [ 4702 p for p in projection if \ 4703 p != db[tablename]._id.name] if projection \ 4704 else None 4705 4706 cursor = None 4707 if isinstance(args_get('reusecursor'), str): 4708 cursor = args_get('reusecursor') 4709 items = gae.Query(tableobj, projection=query_projection, 4710 cursor=cursor) 4711 4712 for filter in filters: 4713 if args_get('projection') == True and \ 4714 filter.name in query_projection and \ 4715 filter.op in ['=', '<=', '>=']: 4716 raise SyntaxError( 4717 "projection fields cannot have equality filters") 4718 if filter.name=='__key__' and filter.op=='>' and filter.value==0: 4719 continue 4720 elif filter.name=='__key__' and filter.op=='=': 4721 if filter.value==0: 4722 items = [] 4723 elif isinstance(filter.value, Key): 4724 # key qeuries return a class instance, 4725 # can't use projection 4726 # extra values will be ignored in post-processing later 4727 item = tableobj.get(filter.value) 4728 items = (item and [item]) or [] 4729 else: 4730 # key qeuries return a class instance, 4731 # can't use projection 4732 # extra values will be ignored in post-processing later 4733 item = tableobj.get_by_id(filter.value) 4734 items = (item and [item]) or [] 4735 elif isinstance(items,list): # i.e. there is a single record! 4736 items = [i for i in items if filter.apply( 4737 getattr(item,filter.name),filter.value)] 4738 else: 4739 if filter.name=='__key__' and filter.op != 'in': 4740 items.order('__key__') 4741 items = items.filter('%s %s' % (filter.name,filter.op), 4742 filter.value) 4743 if not isinstance(items,list): 4744 if args_get('left', None): 4745 raise SyntaxError('Set: no left join in appengine') 4746 if args_get('groupby', None): 4747 raise SyntaxError('Set: no groupby in appengine') 4748 orderby = args_get('orderby', False) 4749 if orderby: 4750 ### THIS REALLY NEEDS IMPROVEMENT !!! 4751 if isinstance(orderby, (list, tuple)): 4752 orderby = xorify(orderby) 4753 if isinstance(orderby,Expression): 4754 orderby = self.expand(orderby) 4755 orders = orderby.split(', ') 4756 for order in orders: 4757 order={'-id':'-__key__','id':'__key__'}.get(order,order) 4758 items = items.order(order) 4759 if args_get('limitby', None): 4760 (lmin, lmax) = attributes['limitby'] 4761 (limit, offset) = (lmax - lmin, lmin) 4762 rows = items.fetch(limit,offset=offset) 4763 #cursor is only useful if there was a limit and we didn't return 4764 # all results 4765 if args_get('reusecursor'): 4766 db['_lastcursor'] = items.cursor() 4767 items = rows 4768 return (items, tablename, projection or db[tablename].fields)
4769
4770 - def select(self,query,fields,attributes):
4771 """ 4772 This is the GAE version of select. some notes to consider: 4773 - db['_lastsql'] is not set because there is not SQL statement string 4774 for a GAE query 4775 - 'nativeRef' is a magical fieldname used for self references on GAE 4776 - optional attribute 'projection' when set to True will trigger 4777 use of the GAE projection queries. note that there are rules for 4778 what is accepted imposed by GAE: each field must be indexed, 4779 projection queries cannot contain blob or text fields, and you 4780 cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection 4781 - optional attribute 'reusecursor' allows use of cursor with queries 4782 that have the limitby attribute. Set the attribute to True for the 4783 first query, set it to the value of db['_lastcursor'] to continue 4784 a previous query. The user must save the cursor value between 4785 requests, and the filters must be identical. It is up to the user 4786 to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors 4787 """ 4788 4789 (items, tablename, fields) = self.select_raw(query,fields,attributes) 4790 # self.db['_lastsql'] = self._select(query,fields,attributes) 4791 rows = [[(t==self.db[tablename]._id.name and item) or \ 4792 (t=='nativeRef' and item) or getattr(item, t) \ 4793 for t in fields] for item in items] 4794 colnames = ['%s.%s' % (tablename, t) for t in fields] 4795 processor = attributes.get('processor',self.parse) 4796 return processor(rows,fields,colnames,False)
4797
4798 - def count(self,query,distinct=None,limit=None):
4799 if distinct: 4800 raise RuntimeError("COUNT DISTINCT not supported") 4801 (items, tablename, fields) = self.select_raw(query) 4802 # self.db['_lastsql'] = self._count(query) 4803 try: 4804 return len(items) 4805 except TypeError: 4806 return items.count(limit=limit)
4807
4808 - def delete(self,tablename, query):
4809 """ 4810 This function was changed on 2010-05-04 because according to 4811 http://code.google.com/p/googleappengine/issues/detail?id=3119 4812 GAE no longer supports deleting more than 1000 records. 4813 """ 4814 # self.db['_lastsql'] = self._delete(tablename,query) 4815 (items, tablename, fields) = self.select_raw(query) 4816 # items can be one item or a query 4817 if not isinstance(items,list): 4818 #use a keys_only query to ensure that this runs as a datastore 4819 # small operations 4820 leftitems = items.fetch(1000, keys_only=True) 4821 counter = 0 4822 while len(leftitems): 4823 counter += len(leftitems) 4824 gae.delete(leftitems) 4825 leftitems = items.fetch(1000, keys_only=True) 4826 else: 4827 counter = len(items) 4828 gae.delete(items) 4829 return counter
4830
4831 - def update(self,tablename,query,update_fields):
4832 # self.db['_lastsql'] = self._update(tablename,query,update_fields) 4833 (items, tablename, fields) = self.select_raw(query) 4834 counter = 0 4835 for item in items: 4836 for field, value in update_fields: 4837 setattr(item, field.name, self.represent(value,field.type)) 4838 item.put() 4839 counter += 1 4840 LOGGER.info(str(counter)) 4841 return counter
4842
4843 - def insert(self,table,fields):
4844 dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) 4845 # table._db['_lastsql'] = self._insert(table,fields) 4846 tmp = table._tableobj(**dfields) 4847 tmp.put() 4848 rid = Reference(tmp.key().id()) 4849 (rid._table, rid._record, rid._gaekey) = (table, None, tmp.key()) 4850 return rid
4851
4852 - def bulk_insert(self,table,items):
4853 parsed_items = [] 4854 for item in items: 4855 dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) 4856 parsed_items.append(table._tableobj(**dfields)) 4857 gae.put(parsed_items) 4858 return True
4859
4860 -def uuid2int(uuidv):
4861 return uuid.UUID(uuidv).int
4862
4863 -def int2uuid(n):
4864 return str(uuid.UUID(int=n))
4865
4866 -class CouchDBAdapter(NoSQLAdapter):
4867 drivers = ('couchdb',) 4868 4869 uploads_in_blob = True 4870 types = { 4871 'boolean': bool, 4872 'string': str, 4873 'text': str, 4874 'json': str, 4875 'password': str, 4876 'blob': str, 4877 'upload': str, 4878 'integer': long, 4879 'bigint': long, 4880 'float': float, 4881 'double': float, 4882 'date': datetime.date, 4883 'time': datetime.time, 4884 'datetime': datetime.datetime, 4885 'id': long, 4886 'reference': long, 4887 'list:string': list, 4888 'list:integer': list, 4889 'list:reference': list, 4890 } 4891
4892 - def file_exists(self, filename): pass
4893 - def file_open(self, filename, mode='rb', lock=True): pass
4894 - def file_close(self, fileobj): pass
4895
4896 - def expand(self,expression,field_type=None):
4897 if isinstance(expression,Field): 4898 if expression.type=='id': 4899 return "%s._id" % expression.tablename 4900 return BaseAdapter.expand(self,expression,field_type)
4901
4902 - def AND(self,first,second):
4903 return '(%s && %s)' % (self.expand(first),self.expand(second))
4904
4905 - def OR(self,first,second):
4906 return '(%s || %s)' % (self.expand(first),self.expand(second))
4907
4908 - def EQ(self,first,second):
4909 if second is None: 4910 return '(%s == null)' % self.expand(first) 4911 return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
4912
4913 - def NE(self,first,second):
4914 if second is None: 4915 return '(%s != null)' % self.expand(first) 4916 return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
4917
4918 - def COMMA(self,first,second):
4919 return '%s + %s' % (self.expand(first),self.expand(second))
4920
4921 - def represent(self, obj, fieldtype):
4922 value = NoSQLAdapter.represent(self, obj, fieldtype) 4923 if fieldtype=='id': 4924 return repr(str(int(value))) 4925 elif fieldtype in ('date','time','datetime','boolean'): 4926 return serializers.json(value) 4927 return repr(not isinstance(value,unicode) and value \ 4928 or value and value.encode('utf8'))
4929
4930 - def __init__(self,db,uri='couchdb://127.0.0.1:5984', 4931 pool_size=0,folder=None,db_codec ='UTF-8', 4932 credential_decoder=IDENTITY, driver_args={}, 4933 adapter_args={}, do_connect=True, after_connection=None):
4934 self.db = db 4935 self.uri = uri 4936 if do_connect: self.find_driver(adapter_args) 4937 self.dbengine = 'couchdb' 4938 self.folder = folder 4939 db['_lastsql'] = '' 4940 self.db_codec = 'UTF-8' 4941 self._after_connection = after_connection 4942 self.pool_size = pool_size 4943 4944 url='http://'+uri[10:] 4945 def connector(url=url,driver_args=driver_args): 4946 return self.driver.Server(url,**driver_args)
4947 self.reconnect(connector,cursor=False)
4948
4949 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
4950 if migrate: 4951 try: 4952 self.connection.create(table._tablename) 4953 except: 4954 pass
4955
4956 - def insert(self,table,fields):
4957 id = uuid2int(web2py_uuid()) 4958 ctable = self.connection[table._tablename] 4959 values = dict((k.name,self.represent(v,k.type)) for k,v in fields) 4960 values['_id'] = str(id) 4961 ctable.save(values) 4962 return id
4963
4964 - def _select(self,query,fields,attributes):
4965 if not isinstance(query,Query): 4966 raise SyntaxError("Not Supported") 4967 for key in set(attributes.keys())-SELECT_ARGS: 4968 raise SyntaxError('invalid select attribute: %s' % key) 4969 new_fields=[] 4970 for item in fields: 4971 if isinstance(item,SQLALL): 4972 new_fields += item._table 4973 else: 4974 new_fields.append(item) 4975 def uid(fd): 4976 return fd=='id' and '_id' or fd
4977 def get(row,fd): 4978 return fd=='id' and int(row['_id']) or row.get(fd,None) 4979 fields = new_fields 4980 tablename = self.get_table(query) 4981 fieldnames = [f.name for f in (fields or self.db[tablename])] 4982 colnames = ['%s.%s' % (tablename,k) for k in fieldnames] 4983 fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) 4984 fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\ 4985 dict(t=tablename, 4986 query=self.expand(query), 4987 order='%s._id' % tablename, 4988 fields=fields) 4989 return fn, colnames 4990
4991 - def select(self,query,fields,attributes):
4992 if not isinstance(query,Query): 4993 raise SyntaxError("Not Supported") 4994 fn, colnames = self._select(query,fields,attributes) 4995 tablename = colnames[0].split('.')[0] 4996 ctable = self.connection[tablename] 4997 rows = [cols['value'] for cols in ctable.query(fn)] 4998 processor = attributes.get('processor',self.parse) 4999 return processor(rows,fields,colnames,False)
5000
5001 - def delete(self,tablename,query):
5002 if not isinstance(query,Query): 5003 raise SyntaxError("Not Supported") 5004 if query.first.type=='id' and query.op==self.EQ: 5005 id = query.second 5006 tablename = query.first.tablename 5007 assert(tablename == query.first.tablename) 5008 ctable = self.connection[tablename] 5009 try: 5010 del ctable[str(id)] 5011 return 1 5012 except couchdb.http.ResourceNotFound: 5013 return 0 5014 else: 5015 tablename = self.get_table(query) 5016 rows = self.select(query,[self.db[tablename]._id],{}) 5017 ctable = self.connection[tablename] 5018 for row in rows: 5019 del ctable[str(row.id)] 5020 return len(rows)
5021
5022 - def update(self,tablename,query,fields):
5023 if not isinstance(query,Query): 5024 raise SyntaxError("Not Supported") 5025 if query.first.type=='id' and query.op==self.EQ: 5026 id = query.second 5027 tablename = query.first.tablename 5028 ctable = self.connection[tablename] 5029 try: 5030 doc = ctable[str(id)] 5031 for key,value in fields: 5032 doc[key.name] = self.represent(value,self.db[tablename][key.name].type) 5033 ctable.save(doc) 5034 return 1 5035 except couchdb.http.ResourceNotFound: 5036 return 0 5037 else: 5038 tablename = self.get_table(query) 5039 rows = self.select(query,[self.db[tablename]._id],{}) 5040 ctable = self.connection[tablename] 5041 table = self.db[tablename] 5042 for row in rows: 5043 doc = ctable[str(row.id)] 5044 for key,value in fields: 5045 doc[key.name] = self.represent(value,table[key.name].type) 5046 ctable.save(doc) 5047 return len(rows)
5048
5049 - def count(self,query,distinct=None):
5050 if distinct: 5051 raise RuntimeError("COUNT DISTINCT not supported") 5052 if not isinstance(query,Query): 5053 raise SyntaxError("Not Supported") 5054 tablename = self.get_table(query) 5055 rows = self.select(query,[self.db[tablename]._id],{}) 5056 return len(rows)
5057
5058 -def cleanup(text):
5059 """ 5060 validates that the given text is clean: only contains [0-9a-zA-Z_] 5061 """ 5062 if not REGEX_ALPHANUMERIC.match(text): 5063 raise SyntaxError('invalid table or field name: %s' % text) 5064 return text
5065
5066 -class MongoDBAdapter(NoSQLAdapter):
5067 native_json = True 5068 drivers = ('pymongo',) 5069 5070 uploads_in_blob = True 5071 5072 types = { 5073 'boolean': bool, 5074 'string': str, 5075 'text': str, 5076 'json': str, 5077 'password': str, 5078 'blob': str, 5079 'upload': str, 5080 'integer': long, 5081 'bigint': long, 5082 'float': float, 5083 'double': float, 5084 'date': datetime.date, 5085 'time': datetime.time, 5086 'datetime': datetime.datetime, 5087 'id': long, 5088 'reference': long, 5089 'list:string': list, 5090 'list:integer': list, 5091 'list:reference': list, 5092 } 5093 5094 error_messages = {"javascript_needed": "This must yet be replaced" + 5095 " with javascript in order to work."} 5096
5097 - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', 5098 pool_size=0, folder=None, db_codec ='UTF-8', 5099 credential_decoder=IDENTITY, driver_args={}, 5100 adapter_args={}, do_connect=True, after_connection=None):
5101 5102 self.db = db 5103 self.uri = uri 5104 if do_connect: self.find_driver(adapter_args) 5105 import random 5106 from bson.objectid import ObjectId 5107 from bson.son import SON 5108 import pymongo.uri_parser 5109 5110 m = pymongo.uri_parser.parse_uri(uri) 5111 5112 self.SON = SON 5113 self.ObjectId = ObjectId 5114 self.random = random 5115 5116 self.dbengine = 'mongodb' 5117 self.folder = folder 5118 db['_lastsql'] = '' 5119 self.db_codec = 'UTF-8' 5120 self._after_connection = after_connection 5121 self.pool_size = pool_size 5122 #this is the minimum amount of replicates that it should wait 5123 # for on insert/update 5124 self.minimumreplication = adapter_args.get('minimumreplication',0) 5125 # by default all inserts and selects are performand asynchronous, 5126 # but now the default is 5127 # synchronous, except when overruled by either this default or 5128 # function parameter 5129 self.safe = adapter_args.get('safe',True) 5130 5131 if isinstance(m,tuple): 5132 m = {"database" : m[1]} 5133 if m.get('database')==None: 5134 raise SyntaxError("Database is required!") 5135 def connector(uri=self.uri,m=m): 5136 try: 5137 # Connection() is deprecated 5138 if hasattr(self.driver, "MongoClient"): 5139 Connection = self.driver.MongoClient 5140 else: 5141 Connection = self.driver.Connection 5142 return Connection(uri)[m.get('database')] 5143 except self.driver.errors.ConnectionFailure: 5144 inst = sys.exc_info()[1] 5145 raise SyntaxError("The connection to " + 5146 uri + " could not be made")
5147 5148 self.reconnect(connector,cursor=False)
5149
5150 - def object_id(self, arg=None):
5151 """ Convert input to a valid Mongodb ObjectId instance 5152 5153 self.object_id("<random>") -> ObjectId (not unique) instance """ 5154 if not arg: 5155 arg = 0 5156 if isinstance(arg, basestring): 5157 # we assume an integer as default input 5158 rawhex = len(arg.replace("0x", "").replace("L", "")) == 24 5159 if arg.isdigit() and (not rawhex): 5160 arg = int(arg) 5161 elif arg == "<random>": 5162 arg = int("0x%sL" % \ 5163 "".join([self.random.choice("0123456789abcdef") \ 5164 for x in range(24)]), 0) 5165 elif arg.isalnum(): 5166 if not arg.startswith("0x"): 5167 arg = "0x%s" % arg 5168 try: 5169 arg = int(arg, 0) 5170 except ValueError, e: 5171 raise ValueError( 5172 "invalid objectid argument string: %s" % e) 5173 else: 5174 raise ValueError("Invalid objectid argument string. " + 5175 "Requires an integer or base 16 value") 5176 elif isinstance(arg, self.ObjectId): 5177 return arg 5178 if not isinstance(arg, (int, long)): 5179 raise TypeError("object_id argument must be of type " + 5180 "ObjectId or an objectid representable integer") 5181 if arg == 0: 5182 hexvalue = "".zfill(24) 5183 else: 5184 hexvalue = hex(arg)[2:].replace("L", "") 5185 return self.ObjectId(hexvalue)
5186
5187 - def represent(self, obj, fieldtype):
5188 value = NoSQLAdapter.represent(self, obj, fieldtype) 5189 if fieldtype =='date': 5190 if value == None: 5191 return value 5192 # this piece of data can be stripped off based on the fieldtype 5193 t = datetime.time(0, 0, 0) 5194 # mongodb doesn't has a date object and so it must datetime, 5195 # string or integer 5196 return datetime.datetime.combine(value, t) 5197 elif fieldtype == 'time': 5198 if value == None: 5199 return value 5200 # this piece of data can be stripped of based on the fieldtype 5201 d = datetime.date(2000, 1, 1) 5202 # mongodb doesn't has a time object and so it must datetime, 5203 # string or integer 5204 return datetime.datetime.combine(d, value) 5205 elif fieldtype == 'list:string' or \ 5206 fieldtype == 'list:integer' or \ 5207 fieldtype == 'list:reference': 5208 return value 5209 return value
5210 5211 # Safe determines whether a asynchronious request is done or a 5212 # synchronious action is done 5213 # For safety, we use by default synchronious requests
5214 - def insert(self, table, fields, safe=None):
5215 if safe==None: 5216 safe = self.safe 5217 ctable = self.connection[table._tablename] 5218 values = dict() 5219 for k, v in fields: 5220 if not k.name in ["id", "safe"]: 5221 fieldname = k.name 5222 fieldtype = table[k.name].type 5223 if ("reference" in fieldtype) or (fieldtype=="id"): 5224 values[fieldname] = self.object_id(v) 5225 else: 5226 values[fieldname] = self.represent(v, fieldtype) 5227 ctable.insert(values, safe=safe) 5228 return int(str(values['_id']), 16)
5229
5230 - def create_table(self, table, migrate=True, fake_migrate=False, 5231 polymodel=None, isCapped=False):
5232 if isCapped: 5233 raise RuntimeError("Not implemented")
5234
5235 - def count(self, query, distinct=None, snapshot=True):
5236 if distinct: 5237 raise RuntimeError("COUNT DISTINCT not supported") 5238 if not isinstance(query,Query): 5239 raise SyntaxError("Not Supported") 5240 tablename = self.get_table(query) 5241 return int(self.select(query,[self.db[tablename]._id], {}, 5242 count=True,snapshot=snapshot)['count'])
5243 # Maybe it would be faster if we just implemented the pymongo 5244 # .count() function which is probably quicker? 5245 # therefor call __select() connection[table].find(query).count() 5246 # Since this will probably reduce the return set? 5247
5248 - def expand(self, expression, field_type=None):
5249 if isinstance(expression, Query): 5250 # any query using 'id':= 5251 # set name as _id (as per pymongo/mongodb primary key) 5252 # convert second arg to an objectid field 5253 # (if its not already) 5254 # if second arg is 0 convert to objectid 5255 if isinstance(expression.first,Field) and \ 5256 ((expression.first.type == 'id') or \ 5257 ("reference" in expression.first.type)): 5258 if expression.first.type == 'id': 5259 expression.first.name = '_id' 5260 # cast to Mongo ObjectId 5261 expression.second = self.object_id(expression.second) 5262 result = expression.op(expression.first, expression.second) 5263 if isinstance(expression, Field): 5264 if expression.type=='id': 5265 result = "_id" 5266 else: 5267 result = expression.name 5268 5269 elif isinstance(expression, (Expression, Query)): 5270 if not expression.second is None: 5271 result = expression.op(expression.first, expression.second) 5272 elif not expression.first is None: 5273 result = expression.op(expression.first) 5274 elif not isinstance(expression.op, str): 5275 result = expression.op() 5276 else: 5277 result = expression.op 5278 elif field_type: 5279 result = str(self.represent(expression,field_type)) 5280 elif isinstance(expression,(list,tuple)): 5281 result = ','.join(self.represent(item,field_type) for 5282 item in expression) 5283 else: 5284 result = expression 5285 return result
5286
5287 - def _select(self, query, fields, attributes):
5288 if 'for_update' in attributes: 5289 logging.warn('mongodb does not support for_update') 5290 for key in set(attributes.keys())-set(('limitby', 5291 'orderby','for_update')): 5292 if attributes[key]!=None: 5293 logging.warn('select attribute not implemented: %s' % key) 5294 5295 new_fields=[] 5296 mongosort_list = [] 5297 5298 # try an orderby attribute 5299 orderby = attributes.get('orderby', False) 5300 limitby = attributes.get('limitby', False) 5301 # distinct = attributes.get('distinct', False) 5302 if orderby: 5303 if isinstance(orderby, (list, tuple)): 5304 orderby = xorify(orderby) 5305 5306 # !!!! need to add 'random' 5307 for f in self.expand(orderby).split(','): 5308 if f.startswith('-'): 5309 mongosort_list.append((f[1:], -1)) 5310 else: 5311 mongosort_list.append((f, 1)) 5312 5313 if limitby: 5314 limitby_skip, limitby_limit = limitby 5315 else: 5316 limitby_skip = limitby_limit = 0 5317 5318 mongofields_dict = self.SON() 5319 mongoqry_dict = {} 5320 for item in fields: 5321 if isinstance(item, SQLALL): 5322 new_fields += item._table 5323 else: 5324 new_fields.append(item) 5325 fields = new_fields 5326 if isinstance(query,Query): 5327 tablename = self.get_table(query) 5328 elif len(fields) != 0: 5329 tablename = fields[0].tablename 5330 else: 5331 raise SyntaxError("The table name could not be found in " + 5332 "the query nor from the select statement.") 5333 5334 mongoqry_dict = self.expand(query) 5335 fields = fields or self.db[tablename] 5336 for field in fields: 5337 mongofields_dict[field.name] = 1 5338 5339 return tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5340 limitby_limit, limitby_skip
5341 5342
5343 - def select(self, query, fields, attributes, count=False, 5344 snapshot=False):
5345 # TODO: support joins 5346 tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5347 limitby_limit, limitby_skip = self._select(query, fields, attributes) 5348 ctable = self.connection[tablename] 5349 5350 if count: 5351 return {'count' : ctable.find( 5352 mongoqry_dict, mongofields_dict, 5353 skip=limitby_skip, limit=limitby_limit, 5354 sort=mongosort_list, snapshot=snapshot).count()} 5355 else: 5356 # pymongo cursor object 5357 mongo_list_dicts = ctable.find(mongoqry_dict, 5358 mongofields_dict, skip=limitby_skip, 5359 limit=limitby_limit, sort=mongosort_list, 5360 snapshot=snapshot) 5361 rows = [] 5362 # populate row in proper order 5363 # Here we replace ._id with .id to follow the standard naming 5364 colnames = [] 5365 newnames = [] 5366 for field in fields: 5367 colname = str(field) 5368 colnames.append(colname) 5369 tablename, fieldname = colname.split(".") 5370 if fieldname == "_id": 5371 # Mongodb reserved uuid key 5372 field.name = "id" 5373 newnames.append(".".join((tablename, field.name))) 5374 5375 for record in mongo_list_dicts: 5376 row=[] 5377 for colname in colnames: 5378 tablename, fieldname = colname.split(".") 5379 # switch to Mongo _id uuids for retrieving 5380 # record id's 5381 if fieldname == "id": fieldname = "_id" 5382 if fieldname in record: 5383 if isinstance(record[fieldname], 5384 self.ObjectId): 5385 value = int(str(record[fieldname]), 16) 5386 else: 5387 value = record[fieldname] 5388 else: 5389 value = None 5390 row.append(value) 5391 rows.append(row) 5392 processor = attributes.get('processor', self.parse) 5393 result = processor(rows, fields, newnames, False) 5394 return result
5395 5396
5397 - def INVERT(self, first):
5398 #print "in invert first=%s" % first 5399 return '-%s' % self.expand(first)
5400
5401 - def drop(self, table, mode=''):
5402 ctable = self.connection[table._tablename] 5403 ctable.drop()
5404 5405
5406 - def truncate(self, table, mode, safe=None):
5407 if safe == None: 5408 safe=self.safe 5409 ctable = self.connection[table._tablename] 5410 ctable.remove(None, safe=True)
5411
5412 - def oupdate(self, tablename, query, fields):
5413 if not isinstance(query, Query): 5414 raise SyntaxError("Not Supported") 5415 filter = None 5416 if query: 5417 filter = self.expand(query) 5418 modify = {'$set': dict((k.name, self.represent(v, k.type)) for 5419 k, v in fields)} 5420 return modify, filter
5421
5422 - def update(self, tablename, query, fields, safe=None):
5423 if safe == None: 5424 safe = self.safe 5425 # return amount of adjusted rows or zero, but no exceptions 5426 # @ related not finding the result 5427 if not isinstance(query, Query): 5428 raise RuntimeError("Not implemented") 5429 amount = self.count(query, False) 5430 modify, filter = self.oupdate(tablename, query, fields) 5431 try: 5432 result = self.connection[tablename].update(filter, 5433 modify, multi=True, safe=safe) 5434 if safe: 5435 try: 5436 # if result count is available fetch it 5437 return result["n"] 5438 except (KeyError, AttributeError, TypeError): 5439 return amount 5440 else: 5441 return amount 5442 except Exception, e: 5443 # TODO Reverse update query to verifiy that the query succeded 5444 raise RuntimeError("uncaught exception when updating rows: %s" % e)
5445 5446 #this function returns a dict with the where clause and update fields
5447 - def _update(self,tablename,query,fields):
5448 return str(self.oupdate(tablename, query, fields))
5449
5450 - def delete(self, tablename, query, safe=None):
5451 if safe is None: 5452 safe = self.safe 5453 amount = 0 5454 amount = self.count(query, False) 5455 if not isinstance(query, Query): 5456 raise RuntimeError("query type %s is not supported" % \ 5457 type(query)) 5458 filter = self.expand(query) 5459 self._delete(tablename, filter, safe=safe) 5460 return amount
5461
5462 - def _delete(self, tablename, filter, safe=None):
5463 return self.connection[tablename].remove(filter, safe=safe)
5464
5465 - def bulk_insert(self, table, items):
5466 return [self.insert(table,item) for item in items]
5467 5468 # TODO This will probably not work:(
5469 - def NOT(self, first):
5470 result = {} 5471 result["$not"] = self.expand(first) 5472 return result
5473
5474 - def AND(self,first,second):
5475 f = self.expand(first) 5476 s = self.expand(second) 5477 f.update(s) 5478 return f
5479
5480 - def OR(self,first,second):
5481 # pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]}) 5482 result = {} 5483 f = self.expand(first) 5484 s = self.expand(second) 5485 result['$or'] = [f,s] 5486 return result
5487
5488 - def BELONGS(self, first, second):
5489 if isinstance(second, str): 5490 return {self.expand(first) : {"$in" : [ second[:-1]]} } 5491 elif second==[] or second==(): 5492 return {1:0} 5493 items = [self.expand(item, first.type) for item in second] 5494 return {self.expand(first) : {"$in" : items} }
5495
5496 - def EQ(self,first,second):
5497 result = {} 5498 result[self.expand(first)] = self.expand(second) 5499 return result
5500
5501 - def NE(self, first, second=None):
5502 result = {} 5503 result[self.expand(first)] = {'$ne': self.expand(second)} 5504 return result
5505
5506 - def LT(self,first,second=None):
5507 if second is None: 5508 raise RuntimeError("Cannot compare %s < None" % first) 5509 result = {} 5510 result[self.expand(first)] = {'$lt': self.expand(second)} 5511 return result
5512
5513 - def LE(self,first,second=None):
5514 if second is None: 5515 raise RuntimeError("Cannot compare %s <= None" % first) 5516 result = {} 5517 result[self.expand(first)] = {'$lte': self.expand(second)} 5518 return result
5519
5520 - def GT(self,first,second):
5521 result = {} 5522 result[self.expand(first)] = {'$gt': self.expand(second)} 5523 return result
5524
5525 - def GE(self,first,second=None):
5526 if second is None: 5527 raise RuntimeError("Cannot compare %s >= None" % first) 5528 result = {} 5529 result[self.expand(first)] = {'$gte': self.expand(second)} 5530 return result
5531
5532 - def ADD(self, first, second):
5533 raise NotImplementedError(self.error_messages["javascript_needed"]) 5534 return '%s + %s' % (self.expand(first), 5535 self.expand(second, first.type))
5536
5537 - def SUB(self, first, second):
5538 raise NotImplementedError(self.error_messages["javascript_needed"]) 5539 return '(%s - %s)' % (self.expand(first), 5540 self.expand(second, first.type))
5541
5542 - def MUL(self, first, second):
5543 raise NotImplementedError(self.error_messages["javascript_needed"]) 5544 return '(%s * %s)' % (self.expand(first), 5545 self.expand(second, first.type))
5546
5547 - def DIV(self, first, second):
5548 raise NotImplementedError(self.error_messages["javascript_needed"]) 5549 return '(%s / %s)' % (self.expand(first), 5550 self.expand(second, first.type))
5551
5552 - def MOD(self, first, second):
5553 raise NotImplementedError(self.error_messages["javascript_needed"]) 5554 return '(%s %% %s)' % (self.expand(first), 5555 self.expand(second, first.type))
5556
5557 - def AS(self, first, second):
5558 raise NotImplementedError(self.error_messages["javascript_needed"]) 5559 return '%s AS %s' % (self.expand(first), second)
5560 5561 # We could implement an option that simulates a full featured SQL 5562 # database. But I think the option should be set explicit or 5563 # implemented as another library.
5564 - def ON(self, first, second):
5565 raise NotImplementedError("This is not possible in NoSQL" + 5566 " but can be simulated with a wrapper.") 5567 return '%s ON %s' % (self.expand(first), self.expand(second))
5568 5569 # BLOW ARE TWO IMPLEMENTATIONS OF THE SAME FUNCITONS 5570 # WHICH ONE IS BEST? 5571
5572 - def COMMA(self, first, second):
5573 return '%s, %s' % (self.expand(first), self.expand(second))
5574
5575 - def LIKE(self, first, second):
5576 #escaping regex operators? 5577 return {self.expand(first): ('%s' % \ 5578 self.expand(second, 'string').replace('%','/'))}
5579
5580 - def STARTSWITH(self, first, second):
5581 #escaping regex operators? 5582 return {self.expand(first): ('/^%s/' % \ 5583 self.expand(second, 'string'))}
5584
5585 - def ENDSWITH(self, first, second):
5586 #escaping regex operators? 5587 return {self.expand(first): ('/%s^/' % \ 5588 self.expand(second, 'string'))}
5589
5590 - def CONTAINS(self, first, second, case_sensitive=False):
5591 # silently ignore, only case sensitive 5592 # There is a technical difference, but mongodb doesn't support 5593 # that, but the result will be the same 5594 return {self.expand(first) : ('/%s/' % \ 5595 self.expand(second, 'string'))}
5596
5597 - def LIKE(self, first, second):
5598 import re 5599 return {self.expand(first): {'$regex': \ 5600 re.escape(self.expand(second, 5601 'string')).replace('%','.*')}}
5602 5603 #TODO verify full compatibilty with official SQL Like operator
5604 - def STARTSWITH(self, first, second):
5605 #TODO Solve almost the same problem as with endswith 5606 import re 5607 return {self.expand(first): {'$regex' : '^' + 5608 re.escape(self.expand(second, 5609 'string'))}}
5610 5611 #TODO verify full compatibilty with official SQL Like operator
5612 - def ENDSWITH(self, first, second):
5613 #escaping regex operators? 5614 #TODO if searched for a name like zsa_corbitt and the function 5615 # is endswith('a') then this is also returned. 5616 # Aldo it end with a t 5617 import re 5618 return {self.expand(first): {'$regex': \ 5619 re.escape(self.expand(second, 'string')) + '$'}}
5620 5621 #TODO verify full compatibilty with official oracle contains operator
5622 - def CONTAINS(self, first, second, case_sensitive=False):
5623 # silently ignore, only case sensitive 5624 #There is a technical difference, but mongodb doesn't support 5625 # that, but the result will be the same 5626 #TODO contains operators need to be transformed to Regex 5627 return {self.expand(first) : {' $regex': \ 5628 ".*" + re.escape(self.expand(second, 'string')) + ".*"}}
5629
5630 5631 -class IMAPAdapter(NoSQLAdapter):
5632 drivers = ('imaplib',) 5633 5634 """ IMAP server adapter 5635 5636 This class is intended as an interface with 5637 email IMAP servers to perform simple queries in the 5638 web2py DAL query syntax, so email read, search and 5639 other related IMAP mail services (as those implemented 5640 by brands like Google(r), and Yahoo!(r) 5641 can be managed from web2py applications. 5642 5643 The code uses examples by Yuji Tomita on this post: 5644 http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137 5645 and is based in docs for Python imaplib, python email 5646 and email IETF's (i.e. RFC2060 and RFC3501) 5647 5648 This adapter was tested with a small set of operations with Gmail(r). Other 5649 services requests could raise command syntax and response data issues. 5650 5651 It creates its table and field names "statically", 5652 meaning that the developer should leave the table and field 5653 definitions to the DAL instance by calling the adapter's 5654 .define_tables() method. The tables are defined with the 5655 IMAP server mailbox list information. 5656 5657 .define_tables() returns a dictionary mapping dal tablenames 5658 to the server mailbox names with the following structure: 5659 5660 {<tablename>: str <server mailbox name>} 5661 5662 Here is a list of supported fields: 5663 5664 Field Type Description 5665 ################################################################ 5666 uid string 5667 answered boolean Flag 5668 created date 5669 content list:string A list of text or html parts 5670 to string 5671 cc string 5672 bcc string 5673 size integer the amount of octets of the message* 5674 deleted boolean Flag 5675 draft boolean Flag 5676 flagged boolean Flag 5677 sender string 5678 recent boolean Flag 5679 seen boolean Flag 5680 subject string 5681 mime string The mime header declaration 5682 email string The complete RFC822 message** 5683 attachments <type list> Each non text part as dict 5684 encoding string The main detected encoding 5685 5686 *At the application side it is measured as the length of the RFC822 5687 message string 5688 5689 WARNING: As row id's are mapped to email sequence numbers, 5690 make sure your imap client web2py app does not delete messages 5691 during select or update actions, to prevent 5692 updating or deleting different messages. 5693 Sequence numbers change whenever the mailbox is updated. 5694 To avoid this sequence numbers issues, it is recommended the use 5695 of uid fields in query references (although the update and delete 5696 in separate actions rule still applies). 5697 5698 # This is the code recommended to start imap support 5699 # at the app's model: 5700 5701 imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl 5702 imapdb.define_tables() 5703 5704 Here is an (incomplete) list of possible imap commands: 5705 5706 # Count today's unseen messages 5707 # smaller than 6000 octets from the 5708 # inbox mailbox 5709 5710 q = imapdb.INBOX.seen == False 5711 q &= imapdb.INBOX.created == datetime.date.today() 5712 q &= imapdb.INBOX.size < 6000 5713 unread = imapdb(q).count() 5714 5715 # Fetch last query messages 5716 rows = imapdb(q).select() 5717 5718 # it is also possible to filter query select results with limitby and 5719 # sequences of mailbox fields 5720 5721 set.select(<fields sequence>, limitby=(<int>, <int>)) 5722 5723 # Mark last query messages as seen 5724 messages = [row.uid for row in rows] 5725 seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True) 5726 5727 # Delete messages in the imap database that have mails from mr. Gumby 5728 5729 deleted = 0 5730 for mailbox in imapdb.tables 5731 deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete() 5732 5733 # It is possible also to mark messages for deletion instead of ereasing them 5734 # directly with set.update(deleted=True) 5735 5736 5737 # This object give access 5738 # to the adapter auto mailbox 5739 # mapped names (which native 5740 # mailbox has what table name) 5741 5742 imapdb.mailboxes <dict> # tablename, server native name pairs 5743 5744 # To retrieve a table native mailbox name use: 5745 imapdb.<table>.mailbox 5746 5747 ### New features v2.4.1: 5748 5749 # Declare mailboxes statically with tablename, name pairs 5750 # This avoids the extra server names retrieval 5751 5752 imapdb.define_tables({"inbox": "INBOX"}) 5753 5754 # Selects without content/attachments/email columns will only 5755 # fetch header and flags 5756 5757 imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject) 5758 """ 5759 5760 types = { 5761 'string': str, 5762 'text': str, 5763 'date': datetime.date, 5764 'datetime': datetime.datetime, 5765 'id': long, 5766 'boolean': bool, 5767 'integer': int, 5768 'bigint': long, 5769 'blob': str, 5770 'list:string': str, 5771 } 5772 5773 dbengine = 'imap' 5774 5775 REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?$') 5776
5777 - def __init__(self, 5778 db, 5779 uri, 5780 pool_size=0, 5781 folder=None, 5782 db_codec ='UTF-8', 5783 credential_decoder=IDENTITY, 5784 driver_args={}, 5785 adapter_args={}, 5786 do_connect=True, 5787 after_connection=None):
5788 5789 # db uri: user@example.com:password@imap.server.com:123 5790 # TODO: max size adapter argument for preventing large mail transfers 5791 5792 self.db = db 5793 self.uri = uri 5794 if do_connect: self.find_driver(adapter_args) 5795 self.pool_size=pool_size 5796 self.folder = folder 5797 self.db_codec = db_codec 5798 self._after_connection = after_connection 5799 self.credential_decoder = credential_decoder 5800 self.driver_args = driver_args 5801 self.adapter_args = adapter_args 5802 self.mailbox_size = None 5803 self.static_names = None 5804 self.charset = sys.getfilesystemencoding() 5805 # imap class 5806 self.imap4 = None 5807 uri = uri.split("://")[1] 5808 5809 """ MESSAGE is an identifier for sequence number""" 5810 5811 self.flags = ['\\Deleted', '\\Draft', '\\Flagged', 5812 '\\Recent', '\\Seen', '\\Answered'] 5813 self.search_fields = { 5814 'id': 'MESSAGE', 'created': 'DATE', 5815 'uid': 'UID', 'sender': 'FROM', 5816 'to': 'TO', 'cc': 'CC', 5817 'bcc': 'BCC', 'content': 'TEXT', 5818 'size': 'SIZE', 'deleted': '\\Deleted', 5819 'draft': '\\Draft', 'flagged': '\\Flagged', 5820 'recent': '\\Recent', 'seen': '\\Seen', 5821 'subject': 'SUBJECT', 'answered': '\\Answered', 5822 'mime': None, 'email': None, 5823 'attachments': None 5824 } 5825 5826 db['_lastsql'] = '' 5827 5828 m = self.REGEX_URI.match(uri) 5829 user = m.group('user') 5830 password = m.group('password') 5831 host = m.group('host') 5832 port = int(m.group('port')) 5833 over_ssl = False 5834 if port==993: 5835 over_ssl = True 5836 5837 driver_args.update(host=host,port=port, password=password, user=user) 5838 def connector(driver_args=driver_args): 5839 # it is assumed sucessful authentication alLways 5840 # TODO: support direct connection and login tests 5841 if over_ssl: 5842 self.imap4 = self.driver.IMAP4_SSL 5843 else: 5844 self.imap4 = self.driver.IMAP4 5845 connection = self.imap4(driver_args["host"], driver_args["port"]) 5846 data = connection.login(driver_args["user"], driver_args["password"]) 5847 5848 # static mailbox list 5849 connection.mailbox_names = None 5850 5851 # dummy cursor function 5852 connection.cursor = lambda : True 5853 5854 return connection
5855 5856 self.db.define_tables = self.define_tables 5857 self.connector = connector 5858 if do_connect: self.reconnect()
5859
5860 - def reconnect(self, f=None, cursor=True):
5861 """ 5862 IMAP4 Pool connection method 5863 5864 imap connection lacks of self cursor command. 5865 A custom command should be provided as a replacement 5866 for connection pooling to prevent uncaught remote session 5867 closing 5868 5869 """ 5870 if getattr(self,'connection',None) != None: 5871 return 5872 if f is None: 5873 f = self.connector 5874 5875 if not self.pool_size: 5876 self.connection = f() 5877 self.cursor = cursor and self.connection.cursor() 5878 else: 5879 POOLS = ConnectionPool.POOLS 5880 uri = self.uri 5881 while True: 5882 GLOBAL_LOCKER.acquire() 5883 if not uri in POOLS: 5884 POOLS[uri] = [] 5885 if POOLS[uri]: 5886 self.connection = POOLS[uri].pop() 5887 GLOBAL_LOCKER.release() 5888 self.cursor = cursor and self.connection.cursor() 5889 if self.cursor and self.check_active_connection: 5890 try: 5891 # check if connection is alive or close it 5892 result, data = self.connection.list() 5893 except: 5894 # Possible connection reset error 5895 # TODO: read exception class 5896 self.connection = f() 5897 break 5898 else: 5899 GLOBAL_LOCKER.release() 5900 self.connection = f() 5901 self.cursor = cursor and self.connection.cursor() 5902 break 5903 self.after_connection_hook()
5904
5905 - def get_last_message(self, tablename):
5906 last_message = None 5907 # request mailbox list to the server 5908 # if needed 5909 if not isinstance(self.connection.mailbox_names, dict): 5910 self.get_mailboxes() 5911 try: 5912 result = self.connection.select(self.connection.mailbox_names[tablename]) 5913 last_message = int(result[1][0]) 5914 except (IndexError, ValueError, TypeError, KeyError): 5915 e = sys.exc_info()[1] 5916 LOGGER.debug("Error retrieving the last mailbox sequence number. %s" % str(e)) 5917 return last_message
5918
5919 - def get_uid_bounds(self, tablename):
5920 if not isinstance(self.connection.mailbox_names, dict): 5921 self.get_mailboxes() 5922 # fetch first and last messages 5923 # return (first, last) messages uid's 5924 last_message = self.get_last_message(tablename) 5925 result, data = self.connection.uid("search", None, "(ALL)") 5926 uid_list = data[0].strip().split() 5927 if len(uid_list) <= 0: 5928 return None 5929 else: 5930 return (uid_list[0], uid_list[-1])
5931
5932 - def convert_date(self, date, add=None):
5933 if add is None: 5934 add = datetime.timedelta() 5935 """ Convert a date object to a string 5936 with d-Mon-Y style for IMAP or the inverse 5937 case 5938 5939 add <timedelta> adds to the date object 5940 """ 5941 months = [None, "Jan","Feb","Mar","Apr","May","Jun", 5942 "Jul", "Aug","Sep","Oct","Nov","Dec"] 5943 if isinstance(date, basestring): 5944 # Prevent unexpected date response format 5945 try: 5946 dayname, datestring = date.split(",") 5947 except (ValueError): 5948 LOGGER.debug("Could not parse date text: %s" % date) 5949 return None 5950 date_list = datestring.strip().split() 5951 year = int(date_list[2]) 5952 month = months.index(date_list[1]) 5953 day = int(date_list[0]) 5954 hms = map(int, date_list[3].split(":")) 5955 return datetime.datetime(year, month, day, 5956 hms[0], hms[1], hms[2]) + add 5957 elif isinstance(date, (datetime.datetime, datetime.date)): 5958 return (date + add).strftime("%d-%b-%Y") 5959 5960 else: 5961 return None
5962 5963 @staticmethod
5964 - def header_represent(f, r):
5965 from email.header import decode_header 5966 text, encoding = decode_header(f)[0] 5967 return text
5968
5969 - def encode_text(self, text, charset, errors="replace"):
5970 """ convert text for mail to unicode""" 5971 if text is None: 5972 text = "" 5973 else: 5974 if isinstance(text, str): 5975 if charset is None: 5976 text = unicode(text, "utf-8", errors) 5977 else: 5978 text = unicode(text, charset, errors) 5979 else: 5980 raise Exception("Unsupported mail text type %s" % type(text)) 5981 return text.encode("utf-8")
5982
5983 - def get_charset(self, message):
5984 charset = message.get_content_charset() 5985 return charset
5986
5987 - def get_mailboxes(self):
5988 """ Query the mail database for mailbox names """ 5989 if self.static_names: 5990 # statically defined mailbox names 5991 self.connection.mailbox_names = self.static_names 5992 return self.static_names.keys() 5993 5994 mailboxes_list = self.connection.list() 5995 self.connection.mailbox_names = dict() 5996 mailboxes = list() 5997 x = 0 5998 for item in mailboxes_list[1]: 5999 x = x + 1 6000 item = item.strip() 6001 if not "NOSELECT" in item.upper(): 6002 sub_items = item.split("\"") 6003 sub_items = [sub_item for sub_item in sub_items \ 6004 if len(sub_item.strip()) > 0] 6005 # mailbox = sub_items[len(sub_items) -1] 6006 mailbox = sub_items[-1] 6007 # remove unwanted characters and store original names 6008 # Don't allow leading non alphabetic characters 6009 mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox))) 6010 mailboxes.append(mailbox_name) 6011 self.connection.mailbox_names[mailbox_name] = mailbox 6012 6013 return mailboxes
6014
6015 - def get_query_mailbox(self, query):
6016 nofield = True 6017 tablename = None 6018 attr = query 6019 while nofield: 6020 if hasattr(attr, "first"): 6021 attr = attr.first 6022 if isinstance(attr, Field): 6023 return attr.tablename 6024 elif isinstance(attr, Query): 6025 pass 6026 else: 6027 return None 6028 else: 6029 return None 6030 return tablename
6031
6032 - def is_flag(self, flag):
6033 if self.search_fields.get(flag, None) in self.flags: 6034 return True 6035 else: 6036 return False
6037
6038 - def define_tables(self, mailbox_names=None):
6039 """ 6040 Auto create common IMAP fileds 6041 6042 This function creates fields definitions "statically" 6043 meaning that custom fields as in other adapters should 6044 not be supported and definitions handled on a service/mode 6045 basis (local syntax for Gmail(r), Ymail(r) 6046 6047 Returns a dictionary with tablename, server native mailbox name 6048 pairs. 6049 """ 6050 if mailbox_names: 6051 # optional statically declared mailboxes 6052 self.static_names = mailbox_names 6053 else: 6054 self.static_names = None 6055 if not isinstance(self.connection.mailbox_names, dict): 6056 self.get_mailboxes() 6057 6058 names = self.connection.mailbox_names.keys() 6059 6060 for name in names: 6061 self.db.define_table("%s" % name, 6062 Field("uid", "string", writable=False), 6063 Field("answered", "boolean"), 6064 Field("created", "datetime", writable=False), 6065 Field("content", "list:string", writable=False), 6066 Field("to", "string", writable=False), 6067 Field("cc", "string", writable=False), 6068 Field("bcc", "string", writable=False), 6069 Field("size", "integer", writable=False), 6070 Field("deleted", "boolean"), 6071 Field("draft", "boolean"), 6072 Field("flagged", "boolean"), 6073 Field("sender", "string", writable=False), 6074 Field("recent", "boolean", writable=False), 6075 Field("seen", "boolean"), 6076 Field("subject", "string", writable=False), 6077 Field("mime", "string", writable=False), 6078 Field("email", "string", writable=False, readable=False), 6079 Field("attachments", list, writable=False, readable=False), 6080 Field("encoding") 6081 ) 6082 6083 # Set a special _mailbox attribute for storing 6084 # native mailbox names 6085 self.db[name].mailbox = \ 6086 self.connection.mailbox_names[name] 6087 6088 # decode quoted printable 6089 self.db[name].to.represent = self.db[name].cc.represent = \ 6090 self.db[name].bcc.represent = self.db[name].sender.represent = \ 6091 self.db[name].subject.represent = self.header_represent 6092 6093 # Set the db instance mailbox collections 6094 self.db.mailboxes = self.connection.mailbox_names 6095 return self.db.mailboxes
6096
6097 - def create_table(self, *args, **kwargs):
6098 # not implemented 6099 # but required by DAL 6100 pass
6101
6102 - def _select(self, query, fields, attributes):
6103 if use_common_filters(query): 6104 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6105 return str(query)
6106
6107 - def select(self, query, fields, attributes):
6108 """ Search and Fetch records and return web2py rows 6109 """ 6110 # move this statement elsewhere (upper-level) 6111 if use_common_filters(query): 6112 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6113 6114 import email 6115 # get records from imap server with search + fetch 6116 # convert results to a dictionary 6117 tablename = None 6118 fetch_results = list() 6119 6120 if isinstance(query, Query): 6121 tablename = self.get_table(query) 6122 mailbox = self.connection.mailbox_names.get(tablename, None) 6123 if mailbox is None: 6124 raise ValueError("Mailbox name not found: %s" % mailbox) 6125 else: 6126 # select with readonly 6127 result, selected = self.connection.select(mailbox, True) 6128 if result != "OK": 6129 raise Exception("IMAP error: %s" % selected) 6130 self.mailbox_size = int(selected[0]) 6131 search_query = "(%s)" % str(query).strip() 6132 search_result = self.connection.uid("search", None, search_query) 6133 # Normal IMAP response OK is assumed (change this) 6134 if search_result[0] == "OK": 6135 # For "light" remote server responses just get the first 6136 # ten records (change for non-experimental implementation) 6137 # However, light responses are not guaranteed with this 6138 # approach, just fewer messages. 6139 limitby = attributes.get('limitby', None) 6140 messages_set = search_result[1][0].split() 6141 # descending order 6142 messages_set.reverse() 6143 if limitby is not None: 6144 # TODO: orderby, asc/desc, limitby from complete message set 6145 messages_set = messages_set[int(limitby[0]):int(limitby[1])] 6146 6147 # keep the requests small for header/flags 6148 if any([(field.name in ["content", "size", 6149 "attachments", "email"]) for 6150 field in fields]): 6151 imap_fields = "(RFC822 FLAGS)" 6152 else: 6153 imap_fields = "(RFC822.HEADER FLAGS)" 6154 6155 if len(messages_set) > 0: 6156 # create fetch results object list 6157 # fetch each remote message and store it in memmory 6158 # (change to multi-fetch command syntax for faster 6159 # transactions) 6160 for uid in messages_set: 6161 # fetch the RFC822 message body 6162 typ, data = self.connection.uid("fetch", uid, imap_fields) 6163 if typ == "OK": 6164 fr = {"message": int(data[0][0].split()[0]), 6165 "uid": int(uid), 6166 "email": email.message_from_string(data[0][1]), 6167 "raw_message": data[0][1]} 6168 fr["multipart"] = fr["email"].is_multipart() 6169 # fetch flags for the message 6170 fr["flags"] = self.driver.ParseFlags(data[1]) 6171 fetch_results.append(fr) 6172 else: 6173 # error retrieving the message body 6174 raise Exception("IMAP error retrieving the body: %s" % data) 6175 else: 6176 raise Exception("IMAP search error: %s" % search_result[1]) 6177 elif isinstance(query, (Expression, basestring)): 6178 raise NotImplementedError() 6179 else: 6180 raise TypeError("Unexpected query type") 6181 6182 imapqry_dict = {} 6183 imapfields_dict = {} 6184 6185 if len(fields) == 1 and isinstance(fields[0], SQLALL): 6186 allfields = True 6187 elif len(fields) == 0: 6188 allfields = True 6189 else: 6190 allfields = False 6191 if allfields: 6192 colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()] 6193 else: 6194 colnames = ["%s.%s" % (tablename, field.name) for field in fields] 6195 6196 for k in colnames: 6197 imapfields_dict[k] = k 6198 6199 imapqry_list = list() 6200 imapqry_array = list() 6201 for fr in fetch_results: 6202 attachments = [] 6203 content = [] 6204 size = 0 6205 n = int(fr["message"]) 6206 item_dict = dict() 6207 message = fr["email"] 6208 uid = fr["uid"] 6209 charset = self.get_charset(message) 6210 flags = fr["flags"] 6211 raw_message = fr["raw_message"] 6212 # Return messages data mapping static fields 6213 # and fetched results. Mapping should be made 6214 # outside the select function (with auxiliary 6215 # instance methods) 6216 6217 # pending: search flags states trough the email message 6218 # instances for correct output 6219 6220 # preserve subject encoding (ASCII/quoted printable) 6221 6222 if "%s.id" % tablename in colnames: 6223 item_dict["%s.id" % tablename] = n 6224 if "%s.created" % tablename in colnames: 6225 item_dict["%s.created" % tablename] = self.convert_date(message["Date"]) 6226 if "%s.uid" % tablename in colnames: 6227 item_dict["%s.uid" % tablename] = uid 6228 if "%s.sender" % tablename in colnames: 6229 # If there is no encoding found in the message header 6230 # force utf-8 replacing characters (change this to 6231 # module's defaults). Applies to .sender, .to, .cc and .bcc fields 6232 item_dict["%s.sender" % tablename] = message["From"] 6233 if "%s.to" % tablename in colnames: 6234 item_dict["%s.to" % tablename] = message["To"] 6235 if "%s.cc" % tablename in colnames: 6236 if "Cc" in message.keys(): 6237 item_dict["%s.cc" % tablename] = message["Cc"] 6238 else: 6239 item_dict["%s.cc" % tablename] = "" 6240 if "%s.bcc" % tablename in colnames: 6241 if "Bcc" in message.keys(): 6242 item_dict["%s.bcc" % tablename] = message["Bcc"] 6243 else: 6244 item_dict["%s.bcc" % tablename] = "" 6245 if "%s.deleted" % tablename in colnames: 6246 item_dict["%s.deleted" % tablename] = "\\Deleted" in flags 6247 if "%s.draft" % tablename in colnames: 6248 item_dict["%s.draft" % tablename] = "\\Draft" in flags 6249 if "%s.flagged" % tablename in colnames: 6250 item_dict["%s.flagged" % tablename] = "\\Flagged" in flags 6251 if "%s.recent" % tablename in colnames: 6252 item_dict["%s.recent" % tablename] = "\\Recent" in flags 6253 if "%s.seen" % tablename in colnames: 6254 item_dict["%s.seen" % tablename] = "\\Seen" in flags 6255 if "%s.subject" % tablename in colnames: 6256 item_dict["%s.subject" % tablename] = message["Subject"] 6257 if "%s.answered" % tablename in colnames: 6258 item_dict["%s.answered" % tablename] = "\\Answered" in flags 6259 if "%s.mime" % tablename in colnames: 6260 item_dict["%s.mime" % tablename] = message.get_content_type() 6261 if "%s.encoding" % tablename in colnames: 6262 item_dict["%s.encoding" % tablename] = charset 6263 6264 # Here goes the whole RFC822 body as an email instance 6265 # for controller side custom processing 6266 # The message is stored as a raw string 6267 # >> email.message_from_string(raw string) 6268 # returns a Message object for enhanced object processing 6269 if "%s.email" % tablename in colnames: 6270 # WARNING: no encoding performed (raw message) 6271 item_dict["%s.email" % tablename] = raw_message 6272 6273 # Size measure as suggested in a Velocity Reviews post 6274 # by Tim Williams: "how to get size of email attachment" 6275 # Note: len() and server RFC822.SIZE reports doesn't match 6276 # To retrieve the server size for representation would add a new 6277 # fetch transaction to the process 6278 for part in message.walk(): 6279 maintype = part.get_content_maintype() 6280 if ("%s.attachments" % tablename in colnames) or \ 6281 ("%s.content" % tablename in colnames): 6282 if "%s.attachments" % tablename in colnames: 6283 if not ("text" in maintype): 6284 payload = part.get_payload(decode=True) 6285 if payload: 6286 attachment = { 6287 "payload": payload, 6288 "filename": part.get_filename(), 6289 "encoding": part.get_content_charset(), 6290 "mime": part.get_content_type(), 6291 "disposition": part["Content-Disposition"]} 6292 attachments.append(attachment) 6293 if "%s.content" % tablename in colnames: 6294 payload = part.get_payload(decode=True) 6295 part_charset = self.get_charset(part) 6296 if "text" in maintype: 6297 if payload: 6298 content.append(self.encode_text(payload, part_charset)) 6299 if "%s.size" % tablename in colnames: 6300 if part is not None: 6301 size += len(str(part)) 6302 item_dict["%s.content" % tablename] = bar_encode(content) 6303 item_dict["%s.attachments" % tablename] = attachments 6304 item_dict["%s.size" % tablename] = size 6305 imapqry_list.append(item_dict) 6306 6307 # extra object mapping for the sake of rows object 6308 # creation (sends an array or lists) 6309 for item_dict in imapqry_list: 6310 imapqry_array_item = list() 6311 for fieldname in colnames: 6312 imapqry_array_item.append(item_dict[fieldname]) 6313 imapqry_array.append(imapqry_array_item) 6314 6315 # parse result and return a rows object 6316 colnames = colnames 6317 processor = attributes.get('processor',self.parse) 6318 return processor(imapqry_array, fields, colnames)
6319
6320 - def _update(self, tablename, query, fields, commit=False):
6321 # TODO: the adapter should implement an .expand method 6322 commands = list() 6323 if use_common_filters(query): 6324 query = self.common_filter(query, [tablename,]) 6325 mark = [] 6326 unmark = [] 6327 if query: 6328 for item in fields: 6329 field = item[0] 6330 name = field.name 6331 value = item[1] 6332 if self.is_flag(name): 6333 flag = self.search_fields[name] 6334 if (value is not None) and (flag != "\\Recent"): 6335 if value: 6336 mark.append(flag) 6337 else: 6338 unmark.append(flag) 6339 result, data = self.connection.select( 6340 self.connection.mailbox_names[tablename]) 6341 string_query = "(%s)" % query 6342 result, data = self.connection.search(None, string_query) 6343 store_list = [item.strip() for item in data[0].split() 6344 if item.strip().isdigit()] 6345 # build commands for marked flags 6346 for number in store_list: 6347 result = None 6348 if len(mark) > 0: 6349 commands.append((number, "+FLAGS", "(%s)" % " ".join(mark))) 6350 if len(unmark) > 0: 6351 commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark))) 6352 return commands
6353
6354 - def update(self, tablename, query, fields):
6355 rowcount = 0 6356 commands = self._update(tablename, query, fields) 6357 for command in commands: 6358 result, data = self.connection.store(*command) 6359 if result == "OK": 6360 rowcount += 1 6361 else: 6362 raise Exception("IMAP storing error: %s" % data) 6363 return rowcount
6364
6365 - def _count(self, query, distinct=None):
6366 raise NotImplementedError()
6367
6368 - def count(self,query,distinct=None):
6369 counter = 0 6370 tablename = self.get_query_mailbox(query) 6371 if query and tablename is not None: 6372 if use_common_filters(query): 6373 query = self.common_filter(query, [tablename,]) 6374 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6375 string_query = "(%s)" % query 6376 result, data = self.connection.search(None, string_query) 6377 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6378 counter = len(store_list) 6379 return counter
6380
6381 - def delete(self, tablename, query):
6382 counter = 0 6383 if query: 6384 if use_common_filters(query): 6385 query = self.common_filter(query, [tablename,]) 6386 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6387 string_query = "(%s)" % query 6388 result, data = self.connection.search(None, string_query) 6389 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6390 for number in store_list: 6391 result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)") 6392 if result == "OK": 6393 counter += 1 6394 else: 6395 raise Exception("IMAP store error: %s" % data) 6396 if counter > 0: 6397 result, data = self.connection.expunge() 6398 return counter
6399
6400 - def BELONGS(self, first, second):
6401 result = None 6402 name = self.search_fields[first.name] 6403 if name == "MESSAGE": 6404 values = [str(val) for val in second if str(val).isdigit()] 6405 result = "%s" % ",".join(values).strip() 6406 6407 elif name == "UID": 6408 values = [str(val) for val in second if str(val).isdigit()] 6409 result = "UID %s" % ",".join(values).strip() 6410 6411 else: 6412 raise Exception("Operation not supported") 6413 # result = "(%s %s)" % (self.expand(first), self.expand(second)) 6414 return result
6415
6416 - def CONTAINS(self, first, second, case_sensitive=False):
6417 # silently ignore, only case sensitive 6418 result = None 6419 name = self.search_fields[first.name] 6420 6421 if name in ("FROM", "TO", "SUBJECT", "TEXT"): 6422 result = "%s \"%s\"" % (name, self.expand(second)) 6423 else: 6424 if first.name in ("cc", "bcc"): 6425 result = "%s \"%s\"" % (first.name.upper(), self.expand(second)) 6426 elif first.name == "mime": 6427 result = "HEADER Content-Type \"%s\"" % self.expand(second) 6428 else: 6429 raise Exception("Operation not supported") 6430 return result
6431
6432 - def GT(self, first, second):
6433 result = None 6434 name = self.search_fields[first.name] 6435 if name == "MESSAGE": 6436 last_message = self.get_last_message(first.tablename) 6437 result = "%d:%d" % (int(self.expand(second)) + 1, last_message) 6438 elif name == "UID": 6439 # GT and LT may not return 6440 # expected sets depending on 6441 # the uid format implemented 6442 try: 6443 pedestal, threshold = self.get_uid_bounds(first.tablename) 6444 except TypeError: 6445 e = sys.exc_info()[1] 6446 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6447 return "" 6448 try: 6449 lower_limit = int(self.expand(second)) + 1 6450 except (ValueError, TypeError): 6451 e = sys.exc_info()[1] 6452 raise Exception("Operation not supported (non integer UID)") 6453 result = "UID %s:%s" % (lower_limit, threshold) 6454 elif name == "DATE": 6455 result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6456 elif name == "SIZE": 6457 result = "LARGER %s" % self.expand(second) 6458 else: 6459 raise Exception("Operation not supported") 6460 return result
6461
6462 - def GE(self, first, second):
6463 result = None 6464 name = self.search_fields[first.name] 6465 if name == "MESSAGE": 6466 last_message = self.get_last_message(first.tablename) 6467 result = "%s:%s" % (self.expand(second), last_message) 6468 elif name == "UID": 6469 # GT and LT may not return 6470 # expected sets depending on 6471 # the uid format implemented 6472 try: 6473 pedestal, threshold = self.get_uid_bounds(first.tablename) 6474 except TypeError: 6475 e = sys.exc_info()[1] 6476 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6477 return "" 6478 lower_limit = self.expand(second) 6479 result = "UID %s:%s" % (lower_limit, threshold) 6480 elif name == "DATE": 6481 result = "SINCE %s" % self.convert_date(second) 6482 else: 6483 raise Exception("Operation not supported") 6484 return result
6485
6486 - def LT(self, first, second):
6487 result = None 6488 name = self.search_fields[first.name] 6489 if name == "MESSAGE": 6490 result = "%s:%s" % (1, int(self.expand(second)) - 1) 6491 elif name == "UID": 6492 try: 6493 pedestal, threshold = self.get_uid_bounds(first.tablename) 6494 except TypeError: 6495 e = sys.exc_info()[1] 6496 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6497 return "" 6498 try: 6499 upper_limit = int(self.expand(second)) - 1 6500 except (ValueError, TypeError): 6501 e = sys.exc_info()[1] 6502 raise Exception("Operation not supported (non integer UID)") 6503 result = "UID %s:%s" % (pedestal, upper_limit) 6504 elif name == "DATE": 6505 result = "BEFORE %s" % self.convert_date(second) 6506 elif name == "SIZE": 6507 result = "SMALLER %s" % self.expand(second) 6508 else: 6509 raise Exception("Operation not supported") 6510 return result
6511
6512 - def LE(self, first, second):
6513 result = None 6514 name = self.search_fields[first.name] 6515 if name == "MESSAGE": 6516 result = "%s:%s" % (1, self.expand(second)) 6517 elif name == "UID": 6518 try: 6519 pedestal, threshold = self.get_uid_bounds(first.tablename) 6520 except TypeError: 6521 e = sys.exc_info()[1] 6522 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6523 return "" 6524 upper_limit = int(self.expand(second)) 6525 result = "UID %s:%s" % (pedestal, upper_limit) 6526 elif name == "DATE": 6527 result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6528 else: 6529 raise Exception("Operation not supported") 6530 return result
6531
6532 - def NE(self, first, second=None):
6533 if (second is None) and isinstance(first, Field): 6534 # All records special table query 6535 if first.type == "id": 6536 return self.GE(first, 1) 6537 result = self.NOT(self.EQ(first, second)) 6538 result = result.replace("NOT NOT", "").strip() 6539 return result
6540
6541 - def EQ(self,first,second):
6542 name = self.search_fields[first.name] 6543 result = None 6544 if name is not None: 6545 if name == "MESSAGE": 6546 # query by message sequence number 6547 result = "%s" % self.expand(second) 6548 elif name == "UID": 6549 result = "UID %s" % self.expand(second) 6550 elif name == "DATE": 6551 result = "ON %s" % self.convert_date(second) 6552 6553 elif name in self.flags: 6554 if second: 6555 result = "%s" % (name.upper()[1:]) 6556 else: 6557 result = "NOT %s" % (name.upper()[1:]) 6558 else: 6559 raise Exception("Operation not supported") 6560 else: 6561 raise Exception("Operation not supported") 6562 return result
6563
6564 - def AND(self, first, second):
6565 result = "%s %s" % (self.expand(first), self.expand(second)) 6566 return result
6567
6568 - def OR(self, first, second):
6569 result = "OR %s %s" % (self.expand(first), self.expand(second)) 6570 return "%s" % result.replace("OR OR", "OR")
6571
6572 - def NOT(self, first):
6573 result = "NOT %s" % self.expand(first) 6574 return result
6575 6576 ######################################################################## 6577 # end of adapters 6578 ######################################################################## 6579 6580 ADAPTERS = { 6581 'sqlite': SQLiteAdapter, 6582 'spatialite': SpatiaLiteAdapter, 6583 'sqlite:memory': SQLiteAdapter, 6584 'spatialite:memory': SpatiaLiteAdapter, 6585 'mysql': MySQLAdapter, 6586 'postgres': PostgreSQLAdapter, 6587 'postgres:psycopg2': PostgreSQLAdapter, 6588 'postgres:pg8000': PostgreSQLAdapter, 6589 'postgres2:psycopg2': NewPostgreSQLAdapter, 6590 'postgres2:pg8000': NewPostgreSQLAdapter, 6591 'oracle': OracleAdapter, 6592 'mssql': MSSQLAdapter, 6593 'mssql2': MSSQL2Adapter, 6594 'mssql3': MSSQL3Adapter, 6595 'sybase': SybaseAdapter, 6596 'db2': DB2Adapter, 6597 'teradata': TeradataAdapter, 6598 'informix': InformixAdapter, 6599 'informix-se': InformixSEAdapter, 6600 'firebird': FireBirdAdapter, 6601 'firebird_embedded': FireBirdAdapter, 6602 'ingres': IngresAdapter, 6603 'ingresu': IngresUnicodeAdapter, 6604 'sapdb': SAPDBAdapter, 6605 'cubrid': CubridAdapter, 6606 'jdbc:sqlite': JDBCSQLiteAdapter, 6607 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 6608 'jdbc:postgres': JDBCPostgreSQLAdapter, 6609 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 6610 'google:datastore': GoogleDatastoreAdapter, 6611 'google:sql': GoogleSQLAdapter, 6612 'couchdb': CouchDBAdapter, 6613 'mongodb': MongoDBAdapter, 6614 'imap': IMAPAdapter 6615 }
6616 6617 6618 -def sqlhtml_validators(field):
6619 """ 6620 Field type validation, using web2py's validators mechanism. 6621 6622 makes sure the content of a field is in line with the declared 6623 fieldtype 6624 """ 6625 db = field.db 6626 if not have_validators: 6627 return [] 6628 field_type, field_length = field.type, field.length 6629 if isinstance(field_type, SQLCustomType): 6630 if hasattr(field_type, 'validator'): 6631 return field_type.validator 6632 else: 6633 field_type = field_type.type 6634 elif not isinstance(field_type,str): 6635 return [] 6636 requires=[] 6637 def ff(r,id): 6638 row=r(id) 6639 if not row: 6640 return id 6641 elif hasattr(r, '_format') and isinstance(r._format,str): 6642 return r._format % row 6643 elif hasattr(r, '_format') and callable(r._format): 6644 return r._format(row) 6645 else: 6646 return id
6647 if field_type in (('string', 'text', 'password')): 6648 requires.append(validators.IS_LENGTH(field_length)) 6649 elif field_type == 'json': 6650 requires.append(validators.IS_EMPTY_OR(validators.IS_JSON())) 6651 elif field_type == 'double' or field_type == 'float': 6652 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 6653 elif field_type in ('integer','bigint'): 6654 requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100)) 6655 elif field_type.startswith('decimal'): 6656 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 6657 elif field_type == 'date': 6658 requires.append(validators.IS_DATE()) 6659 elif field_type == 'time': 6660 requires.append(validators.IS_TIME()) 6661 elif field_type == 'datetime': 6662 requires.append(validators.IS_DATETIME()) 6663 elif db and field_type.startswith('reference') and \ 6664 field_type.find('.') < 0 and \ 6665 field_type[10:] in db.tables: 6666 referenced = db[field_type[10:]] 6667 def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) 6668 field.represent = field.represent or repr_ref 6669 if hasattr(referenced, '_format') and referenced._format: 6670 requires = validators.IS_IN_DB(db,referenced._id, 6671 referenced._format) 6672 if field.unique: 6673 requires._and = validators.IS_NOT_IN_DB(db,field) 6674 if field.tablename == field_type[10:]: 6675 return validators.IS_EMPTY_OR(requires) 6676 return requires 6677 elif db and field_type.startswith('list:reference') and \ 6678 field_type.find('.') < 0 and \ 6679 field_type[15:] in db.tables: 6680 referenced = db[field_type[15:]] 6681 def list_ref_repr(ids, row=None, r=referenced, f=ff): 6682 if not ids: 6683 return None 6684 refs = None 6685 db, id = r._db, r._id 6686 if isinstance(db._adapter, GoogleDatastoreAdapter): 6687 def count(values): return db(id.belongs(values)).select(id) 6688 rx = range(0, len(ids), 30) 6689 refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx]) 6690 else: 6691 refs = db(id.belongs(ids)).select(id) 6692 return (refs and ', '.join(str(f(r,x.id)) for x in refs) or '') 6693 field.represent = field.represent or list_ref_repr 6694 if hasattr(referenced, '_format') and referenced._format: 6695 requires = validators.IS_IN_DB(db,referenced._id, 6696 referenced._format,multiple=True) 6697 else: 6698 requires = validators.IS_IN_DB(db,referenced._id, 6699 multiple=True) 6700 if field.unique: 6701 requires._and = validators.IS_NOT_IN_DB(db,field) 6702 return requires 6703 elif field_type.startswith('list:'): 6704 def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) 6705 field.represent = field.represent or repr_list 6706 if field.unique: 6707 requires.insert(0,validators.IS_NOT_IN_DB(db,field)) 6708 sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] 6709 if field.notnull and not field_type[:2] in sff: 6710 requires.insert(0, validators.IS_NOT_EMPTY()) 6711 elif not field.notnull and field_type[:2] in sff and requires: 6712 requires[-1] = validators.IS_EMPTY_OR(requires[-1]) 6713 return requires 6714
6715 6716 -def bar_escape(item):
6717 return str(item).replace('|', '||')
6718
6719 -def bar_encode(items):
6720 return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
6721
6722 -def bar_decode_integer(value):
6723 if not hasattr(value,'split') and hasattr(value,'read'): 6724 value = value.read() 6725 return [int(x) for x in value.split('|') if x.strip()]
6726
6727 -def bar_decode_string(value):
6728 return [x.replace('||', '|') for x in 6729 REGEX_UNPACK.split(value[1:-1]) if x.strip()]
6730
6731 6732 -class Row(object):
6733 6734 """ 6735 a dictionary that lets you do d['a'] as well as d.a 6736 this is only used to store a Row 6737 """ 6738
6739 - def __init__(self,*args,**kwargs):
6740 self.__dict__.update(*args,**kwargs)
6741
6742 - def __getitem__(self, key):
6743 key=str(key) 6744 m = REGEX_TABLE_DOT_FIELD.match(key) 6745 if key in self.get('_extra',{}): 6746 return self._extra[key] 6747 elif m: 6748 try: 6749 return ogetattr(self, m.group(1))[m.group(2)] 6750 except (KeyError,AttributeError,TypeError): 6751 key = m.group(2) 6752 return ogetattr(self, key)
6753
6754 - def __setitem__(self, key, value):
6755 setattr(self, str(key), value)
6756 6757 __delitem__ = delattr 6758 6759 __copy__ = lambda self: Row(self) 6760 6761 __call__ = __getitem__ 6762
6763 - def get(self,key,default=None):
6764 return self.__dict__.get(key,default)
6765
6766 - def __contains__(self,key):
6767 return key in self.__dict__
6768 6769 has_key = __contains__ 6770
6771 - def __nonzero__(self):
6772 return len(self.__dict__)>0
6773
6774 - def update(self, *args, **kwargs):
6775 self.__dict__.update(*args, **kwargs)
6776
6777 - def keys(self):
6778 return self.__dict__.keys()
6779
6780 - def items(self):
6781 return self.__dict__.items()
6782
6783 - def values(self):
6784 return self.__dict__.values()
6785
6786 - def __iter__(self):
6787 return self.__dict__.__iter__()
6788
6789 - def iteritems(self):
6790 return self.__dict__.iteritems()
6791
6792 - def __str__(self):
6793 ### this could be made smarter 6794 return '<Row %s>' % self.as_dict()
6795
6796 - def __repr__(self):
6797 return '<Row %s>' % self.as_dict()
6798
6799 - def __int__(self):
6800 return object.__getattribute__(self,'id')
6801
6802 - def __eq__(self,other):
6803 try: 6804 return self.as_dict() == other.as_dict() 6805 except AttributeError: 6806 return False
6807
6808 - def __ne__(self,other):
6809 return not (self == other)
6810
6811 - def __copy__(self):
6812 return Row(dict(self))
6813
6814 - def as_dict(self, datetime_to_str=False, custom_types=None):
6815 SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict] 6816 if isinstance(custom_types,(list,tuple,set)): 6817 SERIALIZABLE_TYPES += custom_types 6818 elif custom_types: 6819 SERIALIZABLE_TYPES.append(custom_types) 6820 d = dict(self) 6821 for k in copy.copy(d.keys()): 6822 v=d[k] 6823 if d[k] is None: 6824 continue 6825 elif isinstance(v,Row): 6826 d[k]=v.as_dict() 6827 elif isinstance(v,Reference): 6828 d[k]=int(v) 6829 elif isinstance(v,decimal.Decimal): 6830 d[k]=float(v) 6831 elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): 6832 if datetime_to_str: 6833 d[k] = v.isoformat().replace('T',' ')[:19] 6834 elif not isinstance(v,tuple(SERIALIZABLE_TYPES)): 6835 del d[k] 6836 return d
6837
6838 - def as_xml(self, row_name="row", colnames=None, indent=' '):
6839 def f(row,field,indent=' '): 6840 if isinstance(row,Row): 6841 spc = indent+' \n' 6842 items = [f(row[x],x,indent+' ') for x in row] 6843 return '%s<%s>\n%s\n%s</%s>' % ( 6844 indent, 6845 field, 6846 spc.join(item for item in items if item), 6847 indent, 6848 field) 6849 elif not callable(row): 6850 if REGEX_ALPHANUMERIC.match(field): 6851 return '%s<%s>%s</%s>' % (indent,field,row,field) 6852 else: 6853 return '%s<extra name="%s">%s</extra>' % \ 6854 (indent,field,row) 6855 else: 6856 return None
6857 return f(self, row_name, indent=indent)
6858
6859 - def as_json(self, mode="object", default=None, colnames=None, 6860 serialize=True, **kwargs):
6861 """ 6862 serializes the table to a JSON list of objects 6863 kwargs are passed to .as_dict method 6864 only "object" mode supported for single row 6865 6866 serialize = False used by Rows.as_json 6867 TODO: return array mode with query column order 6868 """ 6869 6870 def inner_loop(record, col): 6871 (t, f) = col.split('.') 6872 res = None 6873 if not REGEX_TABLE_DOT_FIELD.match(col): 6874 key = col 6875 res = record._extra[col] 6876 else: 6877 key = f 6878 if isinstance(record.get(t, None), Row): 6879 res = record[t][f] 6880 else: 6881 res = record[f] 6882 if mode == 'object': 6883 return (key, res) 6884 else: 6885 return res
6886 6887 multi = any([isinstance(v, self.__class__) for v in self.values()]) 6888 mode = mode.lower() 6889 if not mode in ['object', 'array']: 6890 raise SyntaxError('Invalid JSON serialization mode: %s' % mode) 6891 6892 if mode=='object' and colnames: 6893 item = dict([inner_loop(self, col) for col in colnames]) 6894 elif colnames: 6895 item = [inner_loop(self, col) for col in colnames] 6896 else: 6897 if not mode == 'object': 6898 raise SyntaxError('Invalid JSON serialization mode: %s' % mode) 6899 6900 if multi: 6901 item = dict() 6902 [item.update(**v.as_dict(**kwargs)) for v in self.values()] 6903 else: 6904 item = self.as_dict(**kwargs) 6905 6906 if serialize: 6907 if have_serializers: 6908 return serializers.json(item, 6909 default=default or 6910 serializers.custom_json) 6911 elif simplejson: 6912 return simplejson.dumps(item) 6913 else: 6914 raise RuntimeError("missing simplejson") 6915 else: 6916 return item 6917
6918 6919 ################################################################################ 6920 # Everything below should be independent of the specifics of the database 6921 # and should work for RDBMs and some NoSQL databases 6922 ################################################################################ 6923 6924 -class SQLCallableList(list):
6925 - def __call__(self):
6926 return copy.copy(self)
6927
6928 -def smart_query(fields,text):
6929 if not isinstance(fields,(list,tuple)): 6930 fields = [fields] 6931 new_fields = [] 6932 for field in fields: 6933 if isinstance(field,Field): 6934 new_fields.append(field) 6935 elif isinstance(field,Table): 6936 for ofield in field: 6937 new_fields.append(ofield) 6938 else: 6939 raise RuntimeError("fields must be a list of fields") 6940 fields = new_fields 6941 field_map = {} 6942 for field in fields: 6943 n = field.name.lower() 6944 if not n in field_map: 6945 field_map[n] = field 6946 n = str(field).lower() 6947 if not n in field_map: 6948 field_map[n] = field 6949 constants = {} 6950 i = 0 6951 while True: 6952 m = REGEX_CONST_STRING.search(text) 6953 if not m: break 6954 text = text[:m.start()]+('#%i' % i)+text[m.end():] 6955 constants[str(i)] = m.group()[1:-1] 6956 i+=1 6957 text = re.sub('\s+',' ',text).lower() 6958 for a,b in [('&','and'), 6959 ('|','or'), 6960 ('~','not'), 6961 ('==','='), 6962 ('<','<'), 6963 ('>','>'), 6964 ('<=','<='), 6965 ('>=','>='), 6966 ('<>','!='), 6967 ('=<','<='), 6968 ('=>','>='), 6969 ('=','='), 6970 (' less or equal than ','<='), 6971 (' greater or equal than ','>='), 6972 (' equal or less than ','<='), 6973 (' equal or greater than ','>='), 6974 (' less or equal ','<='), 6975 (' greater or equal ','>='), 6976 (' equal or less ','<='), 6977 (' equal or greater ','>='), 6978 (' not equal to ','!='), 6979 (' not equal ','!='), 6980 (' equal to ','='), 6981 (' equal ','='), 6982 (' equals ','='), 6983 (' less than ','<'), 6984 (' greater than ','>'), 6985 (' starts with ','startswith'), 6986 (' ends with ','endswith'), 6987 (' not in ' , 'notbelongs'), 6988 (' in ' , 'belongs'), 6989 (' is ','=')]: 6990 if a[0]==' ': 6991 text = text.replace(' is'+a,' %s ' % b) 6992 text = text.replace(a,' %s ' % b) 6993 text = re.sub('\s+',' ',text).lower() 6994 text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text) 6995 query = field = neg = op = logic = None 6996 for item in text.split(): 6997 if field is None: 6998 if item == 'not': 6999 neg = True 7000 elif not neg and not logic and item in ('and','or'): 7001 logic = item 7002 elif item in field_map: 7003 field = field_map[item] 7004 else: 7005 raise RuntimeError("Invalid syntax") 7006 elif not field is None and op is None: 7007 op = item 7008 elif not op is None: 7009 if item.startswith('#'): 7010 if not item[1:] in constants: 7011 raise RuntimeError("Invalid syntax") 7012 value = constants[item[1:]] 7013 else: 7014 value = item 7015 if field.type in ('text', 'string', 'json'): 7016 if op == '=': op = 'like' 7017 if op == '=': new_query = field==value 7018 elif op == '<': new_query = field<value 7019 elif op == '>': new_query = field>value 7020 elif op == '<=': new_query = field<=value 7021 elif op == '>=': new_query = field>=value 7022 elif op == '!=': new_query = field!=value 7023 elif op == 'belongs': new_query = field.belongs(value.split(',')) 7024 elif op == 'notbelongs': new_query = ~field.belongs(value.split(',')) 7025 elif field.type in ('text', 'string', 'json'): 7026 if op == 'contains': new_query = field.contains(value) 7027 elif op == 'like': new_query = field.like(value) 7028 elif op == 'startswith': new_query = field.startswith(value) 7029 elif op == 'endswith': new_query = field.endswith(value) 7030 else: raise RuntimeError("Invalid operation") 7031 elif field._db._adapter.dbengine=='google:datastore' and \ 7032 field.type in ('list:integer', 'list:string', 'list:reference'): 7033 if op == 'contains': new_query = field.contains(value) 7034 else: raise RuntimeError("Invalid operation") 7035 else: raise RuntimeError("Invalid operation") 7036 if neg: new_query = ~new_query 7037 if query is None: 7038 query = new_query 7039 elif logic == 'and': 7040 query &= new_query 7041 elif logic == 'or': 7042 query |= new_query 7043 field = op = neg = logic = None 7044 return query
7045
7046 -class DAL(object):
7047 7048 """ 7049 an instance of this class represents a database connection 7050 7051 Example:: 7052 7053 db = DAL('sqlite://test.db') 7054 db.define_table('tablename', Field('fieldname1'), 7055 Field('fieldname2')) 7056 7057 (experimental) 7058 you can pass a dict object as uri with the uri string 7059 and table/field definitions. For an example of valid data check 7060 the output of: 7061 7062 >>> db.as_dict(flat=True, sanitize=False) 7063 """ 7064
7065 - def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
7066 if not hasattr(THREAD_LOCAL,'db_instances'): 7067 THREAD_LOCAL.db_instances = {} 7068 if not hasattr(THREAD_LOCAL,'db_instances_zombie'): 7069 THREAD_LOCAL.db_instances_zombie = {} 7070 if uri == '<zombie>': 7071 db_uid = kwargs['db_uid'] # a zombie must have a db_uid! 7072 if db_uid in THREAD_LOCAL.db_instances: 7073 db_group = THREAD_LOCAL.db_instances[db_uid] 7074 db = db_group[-1] 7075 elif db_uid in THREAD_LOCAL.db_instances_zombie: 7076 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7077 else: 7078 db = super(DAL, cls).__new__(cls) 7079 THREAD_LOCAL.db_instances_zombie[db_uid] = db 7080 else: 7081 db_uid = kwargs.get('db_uid',hashlib_md5(repr(uri)).hexdigest()) 7082 if db_uid in THREAD_LOCAL.db_instances_zombie: 7083 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7084 del THREAD_LOCAL.db_instances_zombie[db_uid] 7085 else: 7086 db = super(DAL, cls).__new__(cls) 7087 db_group = THREAD_LOCAL.db_instances.get(db_uid,[]) 7088 db_group.append(db) 7089 THREAD_LOCAL.db_instances[db_uid] = db_group 7090 db._db_uid = db_uid 7091 return db
7092 7093 @staticmethod
7094 - def set_folder(folder):
7095 """ 7096 # ## this allows gluon to set a folder for this thread 7097 # ## <<<<<<<<< Should go away as new DAL replaces old sql.py 7098 """ 7099 BaseAdapter.set_folder(folder)
7100 7101 @staticmethod
7102 - def get_instances():
7103 """ 7104 Returns a dictionary with uri as key with timings and defined tables 7105 {'sqlite://storage.sqlite': { 7106 'dbstats': [(select auth_user.email from auth_user, 0.02009)], 7107 'dbtables': { 7108 'defined': ['auth_cas', 'auth_event', 'auth_group', 7109 'auth_membership', 'auth_permission', 'auth_user'], 7110 'lazy': '[]' 7111 } 7112 } 7113 } 7114 """ 7115 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 7116 infos = {} 7117 for db_uid, db_group in dbs: 7118 for db in db_group: 7119 if not db._uri: 7120 continue 7121 k = hide_password(db._uri) 7122 infos[k] = dict(dbstats = [(row[0], row[1]) for row in db._timings], 7123 dbtables = {'defined': 7124 sorted(list(set(db.tables) - 7125 set(db._LAZY_TABLES.keys()))), 7126 'lazy': sorted(db._LAZY_TABLES.keys())} 7127 ) 7128 return infos
7129 7130 @staticmethod
7131 - def distributed_transaction_begin(*instances):
7132 if not instances: 7133 return 7134 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7135 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7136 instances = enumerate(instances) 7137 for (i, db) in instances: 7138 if not db._adapter.support_distributed_transaction(): 7139 raise SyntaxError( 7140 'distributed transaction not suported by %s' % db._dbname) 7141 for (i, db) in instances: 7142 db._adapter.distributed_transaction_begin(keys[i])
7143 7144 @staticmethod
7145 - def distributed_transaction_commit(*instances):
7146 if not instances: 7147 return 7148 instances = enumerate(instances) 7149 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7150 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7151 for (i, db) in instances: 7152 if not db._adapter.support_distributed_transaction(): 7153 raise SyntaxError( 7154 'distributed transaction not suported by %s' % db._dbanme) 7155 try: 7156 for (i, db) in instances: 7157 db._adapter.prepare(keys[i]) 7158 except: 7159 for (i, db) in instances: 7160 db._adapter.rollback_prepared(keys[i]) 7161 raise RuntimeError('failure to commit distributed transaction') 7162 else: 7163 for (i, db) in instances: 7164 db._adapter.commit_prepared(keys[i]) 7165 return
7166
7167 - def __init__(self, uri=DEFAULT_URI, 7168 pool_size=0, folder=None, 7169 db_codec='UTF-8', check_reserved=None, 7170 migrate=True, fake_migrate=False, 7171 migrate_enabled=True, fake_migrate_all=False, 7172 decode_credentials=False, driver_args=None, 7173 adapter_args=None, attempts=5, auto_import=False, 7174 bigint_id=False,debug=False,lazy_tables=False, 7175 db_uid=None, do_connect=True, after_connection=None):
7176 """ 7177 Creates a new Database Abstraction Layer instance. 7178 7179 Keyword arguments: 7180 7181 :uri: string that contains information for connecting to a database. 7182 (default: 'sqlite://dummy.db') 7183 :pool_size: How many open connections to make to the database object. 7184 :folder: where .table files will be created. 7185 automatically set within web2py 7186 use an explicit path when using DAL outside web2py 7187 :db_codec: string encoding of the database (default: 'UTF-8') 7188 :check_reserved: list of adapters to check tablenames and column names 7189 against sql/nosql reserved keywords. (Default None) 7190 7191 * 'common' List of sql keywords that are common to all database types 7192 such as "SELECT, INSERT". (recommended) 7193 * 'all' Checks against all known SQL keywords. (not recommended) 7194 <adaptername> Checks against the specific adapters list of keywords 7195 (recommended) 7196 * '<adaptername>_nonreserved' Checks against the specific adapters 7197 list of nonreserved keywords. (if available) 7198 :migrate (defaults to True) sets default migrate behavior for all tables 7199 :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables 7200 :migrate_enabled (defaults to True). If set to False disables ALL migrations 7201 :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables 7202 :attempts (defaults to 5). Number of times to attempt connecting 7203 :auto_import (defaults to False). If set, import automatically table definitions from the 7204 databases folder 7205 :bigint_id (defaults to False): If set, turn on bigint instead of int for id fields 7206 :lazy_tables (defaults to False): delay table definition until table access 7207 :after_connection (defaults to None): a callable that will be execute after the connection 7208 """ 7209 7210 items = None 7211 if isinstance(uri, dict): 7212 if "items" in uri: 7213 items = uri.pop("items") 7214 try: 7215 newuri = uri.pop("uri") 7216 except KeyError: 7217 newuri = DEFAULT_URI 7218 locals().update(uri) 7219 uri = newuri 7220 7221 if uri == '<zombie>' and db_uid is not None: return 7222 if not decode_credentials: 7223 credential_decoder = lambda cred: cred 7224 else: 7225 credential_decoder = lambda cred: urllib.unquote(cred) 7226 self._folder = folder 7227 if folder: 7228 self.set_folder(folder) 7229 self._uri = uri 7230 self._pool_size = pool_size 7231 self._db_codec = db_codec 7232 self._lastsql = '' 7233 self._timings = [] 7234 self._pending_references = {} 7235 self._request_tenant = 'request_tenant' 7236 self._common_fields = [] 7237 self._referee_name = '%(table)s' 7238 self._bigint_id = bigint_id 7239 self._debug = debug 7240 self._migrated = [] 7241 self._LAZY_TABLES = {} 7242 self._lazy_tables = lazy_tables 7243 self._tables = SQLCallableList() 7244 self._driver_args = driver_args 7245 self._adapter_args = adapter_args 7246 self._check_reserved = check_reserved 7247 self._decode_credentials = decode_credentials 7248 self._attempts = attempts 7249 self._do_connect = do_connect 7250 7251 if not str(attempts).isdigit() or attempts < 0: 7252 attempts = 5 7253 if uri: 7254 uris = isinstance(uri,(list,tuple)) and uri or [uri] 7255 error = '' 7256 connected = False 7257 for k in range(attempts): 7258 for uri in uris: 7259 try: 7260 if is_jdbc and not uri.startswith('jdbc:'): 7261 uri = 'jdbc:'+uri 7262 self._dbname = REGEX_DBNAME.match(uri).group() 7263 if not self._dbname in ADAPTERS: 7264 raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname) 7265 # notice that driver args or {} else driver_args 7266 # defaults to {} global, not correct 7267 kwargs = dict(db=self,uri=uri, 7268 pool_size=pool_size, 7269 folder=folder, 7270 db_codec=db_codec, 7271 credential_decoder=credential_decoder, 7272 driver_args=driver_args or {}, 7273 adapter_args=adapter_args or {}, 7274 do_connect=do_connect, 7275 after_connection=after_connection) 7276 self._adapter = ADAPTERS[self._dbname](**kwargs) 7277 types = ADAPTERS[self._dbname].types 7278 # copy so multiple DAL() possible 7279 self._adapter.types = copy.copy(types) 7280 if bigint_id: 7281 if 'big-id' in types and 'reference' in types: 7282 self._adapter.types['id'] = types['big-id'] 7283 self._adapter.types['reference'] = types['big-reference'] 7284 connected = True 7285 break 7286 except SyntaxError: 7287 raise 7288 except Exception: 7289 tb = traceback.format_exc() 7290 sys.stderr.write('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb)) 7291 if connected: 7292 break 7293 else: 7294 time.sleep(1) 7295 if not connected: 7296 raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb)) 7297 else: 7298 self._adapter = BaseAdapter(db=self,pool_size=0, 7299 uri='None',folder=folder, 7300 db_codec=db_codec, after_connection=after_connection) 7301 migrate = fake_migrate = False 7302 adapter = self._adapter 7303 self._uri_hash = hashlib_md5(adapter.uri).hexdigest() 7304 self.check_reserved = check_reserved 7305 if self.check_reserved: 7306 from reserved_sql_keywords import ADAPTERS as RSK 7307 self.RSK = RSK 7308 self._migrate = migrate 7309 self._fake_migrate = fake_migrate 7310 self._migrate_enabled = migrate_enabled 7311 self._fake_migrate_all = fake_migrate_all 7312 if auto_import or items: 7313 self.import_table_definitions(adapter.folder, 7314 items=items)
7315 7316 @property
7317 - def tables(self):
7318 return self._tables
7319
7320 - def import_table_definitions(self, path, migrate=False, 7321 fake_migrate=False, items=None):
7322 pattern = pjoin(path,self._uri_hash+'_*.table') 7323 if items: 7324 for tablename, table in items.iteritems(): 7325 # TODO: read all field/table options 7326 fields = [] 7327 # remove unsupported/illegal Table arguments 7328 [table.pop(name) for name in ("name", "fields") if 7329 name in table] 7330 if "items" in table: 7331 for fieldname, field in table.pop("items").iteritems(): 7332 # remove unsupported/illegal Field arguments 7333 [field.pop(key) for key in ("requires", "name", 7334 "compute", "colname") if key in field] 7335 fields.append(Field(str(fieldname), **field)) 7336 self.define_table(str(tablename), *fields, **table) 7337 else: 7338 for filename in glob.glob(pattern): 7339 tfile = self._adapter.file_open(filename, 'r') 7340 try: 7341 sql_fields = pickle.load(tfile) 7342 name = filename[len(pattern)-7:-6] 7343 mf = [(value['sortable'], 7344 Field(key, 7345 type=value['type'], 7346 length=value.get('length',None), 7347 notnull=value.get('notnull',False), 7348 unique=value.get('unique',False))) \ 7349 for key, value in sql_fields.iteritems()] 7350 mf.sort(lambda a,b: cmp(a[0],b[0])) 7351 self.define_table(name,*[item[1] for item in mf], 7352 **dict(migrate=migrate, 7353 fake_migrate=fake_migrate)) 7354 finally: 7355 self._adapter.file_close(tfile)
7356
7357 - def check_reserved_keyword(self, name):
7358 """ 7359 Validates ``name`` against SQL keywords 7360 Uses self.check_reserve which is a list of 7361 operators to use. 7362 self.check_reserved 7363 ['common', 'postgres', 'mysql'] 7364 self.check_reserved 7365 ['all'] 7366 """ 7367 for backend in self.check_reserved: 7368 if name.upper() in self.RSK[backend]: 7369 raise SyntaxError( 7370 'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper()))
7371
7372 - def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
7373 """ 7374 EXAMPLE: 7375 7376 db.define_table('person',Field('name'),Field('info')) 7377 db.define_table('pet',Field('ownedby',db.person),Field('name'),Field('info')) 7378 7379 @request.restful() 7380 def index(): 7381 def GET(*args,**vars): 7382 patterns = [ 7383 "/friends[person]", 7384 "/{person.name}/:field", 7385 "/{person.name}/pets[pet.ownedby]", 7386 "/{person.name}/pets[pet.ownedby]/{pet.name}", 7387 "/{person.name}/pets[pet.ownedby]/{pet.name}/:field", 7388 ("/dogs[pet]", db.pet.info=='dog'), 7389 ("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'), 7390 ] 7391 parser = db.parse_as_rest(patterns,args,vars) 7392 if parser.status == 200: 7393 return dict(content=parser.response) 7394 else: 7395 raise HTTP(parser.status,parser.error) 7396 7397 def POST(table_name,**vars): 7398 if table_name == 'person': 7399 return db.person.validate_and_insert(**vars) 7400 elif table_name == 'pet': 7401 return db.pet.validate_and_insert(**vars) 7402 else: 7403 raise HTTP(400) 7404 return locals() 7405 """ 7406 7407 db = self 7408 re1 = REGEX_SEARCH_PATTERN 7409 re2 = REGEX_SQUARE_BRACKETS 7410 7411 def auto_table(table,base='',depth=0): 7412 patterns = [] 7413 for field in db[table].fields: 7414 if base: 7415 tag = '%s/%s' % (base,field.replace('_','-')) 7416 else: 7417 tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) 7418 f = db[table][field] 7419 if not f.readable: continue 7420 if f.type=='id' or 'slug' in field or f.type.startswith('reference'): 7421 tag += '/{%s.%s}' % (table,field) 7422 patterns.append(tag) 7423 patterns.append(tag+'/:field') 7424 elif f.type.startswith('boolean'): 7425 tag += '/{%s.%s}' % (table,field) 7426 patterns.append(tag) 7427 patterns.append(tag+'/:field') 7428 elif f.type in ('float','double','integer','bigint'): 7429 tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) 7430 patterns.append(tag) 7431 patterns.append(tag+'/:field') 7432 elif f.type.startswith('list:'): 7433 tag += '/{%s.%s.contains}' % (table,field) 7434 patterns.append(tag) 7435 patterns.append(tag+'/:field') 7436 elif f.type in ('date','datetime'): 7437 tag+= '/{%s.%s.year}' % (table,field) 7438 patterns.append(tag) 7439 patterns.append(tag+'/:field') 7440 tag+='/{%s.%s.month}' % (table,field) 7441 patterns.append(tag) 7442 patterns.append(tag+'/:field') 7443 tag+='/{%s.%s.day}' % (table,field) 7444 patterns.append(tag) 7445 patterns.append(tag+'/:field') 7446 if f.type in ('datetime','time'): 7447 tag+= '/{%s.%s.hour}' % (table,field) 7448 patterns.append(tag) 7449 patterns.append(tag+'/:field') 7450 tag+='/{%s.%s.minute}' % (table,field) 7451 patterns.append(tag) 7452 patterns.append(tag+'/:field') 7453 tag+='/{%s.%s.second}' % (table,field) 7454 patterns.append(tag) 7455 patterns.append(tag+'/:field') 7456 if depth>0: 7457 for f in db[table]._referenced_by: 7458 tag+='/%s[%s.%s]' % (table,f.tablename,f.name) 7459 patterns.append(tag) 7460 patterns += auto_table(table,base=tag,depth=depth-1) 7461 return patterns
7462 7463 if patterns == 'auto': 7464 patterns=[] 7465 for table in db.tables: 7466 if not table.startswith('auth_'): 7467 patterns.append('/%s[%s]' % (table,table)) 7468 patterns += auto_table(table,base='',depth=1) 7469 else: 7470 i = 0 7471 while i<len(patterns): 7472 pattern = patterns[i] 7473 if not isinstance(pattern,str): 7474 pattern = pattern[0] 7475 tokens = pattern.split('/') 7476 if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): 7477 new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1], 7478 '/'.join(tokens[:-1])) 7479 patterns = patterns[:i]+new_patterns+patterns[i+1:] 7480 i += len(new_patterns) 7481 else: 7482 i += 1 7483 if '/'.join(args) == 'patterns': 7484 return Row({'status':200,'pattern':'list', 7485 'error':None,'response':patterns}) 7486 for pattern in patterns: 7487 basequery, exposedfields = None, [] 7488 if isinstance(pattern,tuple): 7489 if len(pattern)==2: 7490 pattern, basequery = pattern 7491 elif len(pattern)>2: 7492 pattern, basequery, exposedfields = pattern[0:3] 7493 otable=table=None 7494 if not isinstance(queries,dict): 7495 dbset=db(queries) 7496 if basequery is not None: 7497 dbset = dbset(basequery) 7498 i=0 7499 tags = pattern[1:].split('/') 7500 if len(tags)!=len(args): 7501 continue 7502 for tag in tags: 7503 if re1.match(tag): 7504 # print 're1:'+tag 7505 tokens = tag[1:-1].split('.') 7506 table, field = tokens[0], tokens[1] 7507 if not otable or table == otable: 7508 if len(tokens)==2 or tokens[2]=='eq': 7509 query = db[table][field]==args[i] 7510 elif tokens[2]=='ne': 7511 query = db[table][field]!=args[i] 7512 elif tokens[2]=='lt': 7513 query = db[table][field]<args[i] 7514 elif tokens[2]=='gt': 7515 query = db[table][field]>args[i] 7516 elif tokens[2]=='ge': 7517 query = db[table][field]>=args[i] 7518 elif tokens[2]=='le': 7519 query = db[table][field]<=args[i] 7520 elif tokens[2]=='year': 7521 query = db[table][field].year()==args[i] 7522 elif tokens[2]=='month': 7523 query = db[table][field].month()==args[i] 7524 elif tokens[2]=='day': 7525 query = db[table][field].day()==args[i] 7526 elif tokens[2]=='hour': 7527 query = db[table][field].hour()==args[i] 7528 elif tokens[2]=='minute': 7529 query = db[table][field].minutes()==args[i] 7530 elif tokens[2]=='second': 7531 query = db[table][field].seconds()==args[i] 7532 elif tokens[2]=='startswith': 7533 query = db[table][field].startswith(args[i]) 7534 elif tokens[2]=='contains': 7535 query = db[table][field].contains(args[i]) 7536 else: 7537 raise RuntimeError("invalid pattern: %s" % pattern) 7538 if len(tokens)==4 and tokens[3]=='not': 7539 query = ~query 7540 elif len(tokens)>=4: 7541 raise RuntimeError("invalid pattern: %s" % pattern) 7542 if not otable and isinstance(queries,dict): 7543 dbset = db(queries[table]) 7544 if basequery is not None: 7545 dbset = dbset(basequery) 7546 dbset=dbset(query) 7547 else: 7548 raise RuntimeError("missing relation in pattern: %s" % pattern) 7549 elif re2.match(tag) and args[i]==tag[:tag.find('[')]: 7550 ref = tag[tag.find('[')+1:-1] 7551 if '.' in ref and otable: 7552 table,field = ref.split('.') 7553 selfld = '_id' 7554 if db[table][field].type.startswith('reference '): 7555 refs = [ x.name for x in db[otable] if x.type == db[table][field].type ] 7556 else: 7557 refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ] 7558 if refs: 7559 selfld = refs[0] 7560 if nested_select: 7561 try: 7562 dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld]))) 7563 except ValueError: 7564 return Row({'status':400,'pattern':pattern, 7565 'error':'invalid path','response':None}) 7566 else: 7567 items = [item.id for item in dbset.select(db[otable][selfld])] 7568 dbset=db(db[table][field].belongs(items)) 7569 else: 7570 table = ref 7571 if not otable and isinstance(queries,dict): 7572 dbset = db(queries[table]) 7573 dbset=dbset(db[table]) 7574 elif tag==':field' and table: 7575 # print 're3:'+tag 7576 field = args[i] 7577 if not field in db[table]: break 7578 # hand-built patterns should respect .readable=False as well 7579 if not db[table][field].readable: 7580 return Row({'status':418,'pattern':pattern, 7581 'error':'I\'m a teapot','response':None}) 7582 try: 7583 distinct = vars.get('distinct', False) == 'True' 7584 offset = int(vars.get('offset',None) or 0) 7585 limits = (offset,int(vars.get('limit',None) or 1000)+offset) 7586 except ValueError: 7587 return Row({'status':400,'error':'invalid limits','response':None}) 7588 items = dbset.select(db[table][field], distinct=distinct, limitby=limits) 7589 if items: 7590 return Row({'status':200,'response':items, 7591 'pattern':pattern}) 7592 else: 7593 return Row({'status':404,'pattern':pattern, 7594 'error':'no record found','response':None}) 7595 elif tag != args[i]: 7596 break 7597 otable = table 7598 i += 1 7599 if i==len(tags) and table: 7600 ofields = vars.get('order',db[table]._id.name).split('|') 7601 try: 7602 orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields] 7603 except (KeyError, AttributeError): 7604 return Row({'status':400,'error':'invalid orderby','response':None}) 7605 if exposedfields: 7606 fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable] 7607 else: 7608 fields = [field for field in db[table] if field.readable] 7609 count = dbset.count() 7610 try: 7611 offset = int(vars.get('offset',None) or 0) 7612 limits = (offset,int(vars.get('limit',None) or 1000)+offset) 7613 except ValueError: 7614 return Row({'status':400,'error':'invalid limits','response':None}) 7615 if count > limits[1]-limits[0]: 7616 return Row({'status':400,'error':'too many records','response':None}) 7617 try: 7618 response = dbset.select(limitby=limits,orderby=orderby,*fields) 7619 except ValueError: 7620 return Row({'status':400,'pattern':pattern, 7621 'error':'invalid path','response':None}) 7622 return Row({'status':200,'response':response, 7623 'pattern':pattern,'count':count}) 7624 return Row({'status':400,'error':'no matching pattern','response':None})
7625
7626 - def define_table( 7627 self, 7628 tablename, 7629 *fields, 7630 **args 7631 ):
7632 if not isinstance(tablename,str): 7633 raise SyntaxError("missing table name") 7634 elif hasattr(self,tablename) or tablename in self.tables: 7635 if not args.get('redefine',False): 7636 raise SyntaxError('table already defined: %s' % tablename) 7637 elif tablename.startswith('_') or hasattr(self,tablename) or \ 7638 REGEX_PYTHON_KEYWORDS.match(tablename): 7639 raise SyntaxError('invalid table name: %s' % tablename) 7640 elif self.check_reserved: 7641 self.check_reserved_keyword(tablename) 7642 else: 7643 invalid_args = set(args)-TABLE_ARGS 7644 if invalid_args: 7645 raise SyntaxError('invalid table "%s" attributes: %s' \ 7646 % (tablename,invalid_args)) 7647 if self._lazy_tables and not tablename in self._LAZY_TABLES: 7648 self._LAZY_TABLES[tablename] = (tablename,fields,args) 7649 table = None 7650 else: 7651 table = self.lazy_define_table(tablename,*fields,**args) 7652 if not tablename in self.tables: 7653 self.tables.append(tablename) 7654 return table
7655
7656 - def lazy_define_table( 7657 self, 7658 tablename, 7659 *fields, 7660 **args 7661 ):
7662 args_get = args.get 7663 common_fields = self._common_fields 7664 if common_fields: 7665 fields = list(fields) + list(common_fields) 7666 7667 table_class = args_get('table_class',Table) 7668 table = table_class(self, tablename, *fields, **args) 7669 table._actual = True 7670 self[tablename] = table 7671 # must follow above line to handle self references 7672 table._create_references() 7673 for field in table: 7674 if field.requires == DEFAULT: 7675 field.requires = sqlhtml_validators(field) 7676 7677 migrate = self._migrate_enabled and args_get('migrate',self._migrate) 7678 if migrate and not self._uri in (None,'None') \ 7679 or self._adapter.dbengine=='google:datastore': 7680 fake_migrate = self._fake_migrate_all or \ 7681 args_get('fake_migrate',self._fake_migrate) 7682 polymodel = args_get('polymodel',None) 7683 try: 7684 GLOBAL_LOCKER.acquire() 7685 self._lastsql = self._adapter.create_table( 7686 table,migrate=migrate, 7687 fake_migrate=fake_migrate, 7688 polymodel=polymodel) 7689 finally: 7690 GLOBAL_LOCKER.release() 7691 else: 7692 table._dbt = None 7693 on_define = args_get('on_define',None) 7694 if on_define: on_define(table) 7695 return table
7696
7697 - def as_dict(self, flat=False, sanitize=True, field_options=True):
7698 dbname = db_uid = uri = None 7699 if not sanitize: 7700 uri, dbname, db_uid = (self._uri, self._dbname, self._db_uid) 7701 db_as_dict = dict(items={}, tables=[], uri=uri, dbname=dbname, 7702 db_uid=db_uid, 7703 **dict([(k, getattr(self, "_" + k)) for 7704 k in 'pool_size','folder','db_codec', 7705 'check_reserved','migrate','fake_migrate', 7706 'migrate_enabled','fake_migrate_all', 7707 'decode_credentials','driver_args', 7708 'adapter_args', 'attempts', 7709 'bigint_id','debug','lazy_tables', 7710 'do_connect'])) 7711 7712 for table in self: 7713 tablename = str(table) 7714 db_as_dict["tables"].append(tablename) 7715 db_as_dict["items"][tablename] = table.as_dict(flat=flat, 7716 sanitize=sanitize, 7717 field_options=field_options) 7718 return db_as_dict
7719
7720 - def as_xml(self, sanitize=True, field_options=True):
7721 if not have_serializers: 7722 raise ImportError("No xml serializers available") 7723 d = self.as_dict(flat=True, sanitize=sanitize, 7724 field_options=field_options) 7725 return serializers.xml(d)
7726
7727 - def as_json(self, sanitize=True, field_options=True):
7728 if not have_serializers: 7729 raise ImportError("No json serializers available") 7730 d = self.as_dict(flat=True, sanitize=sanitize, 7731 field_options=field_options) 7732 return serializers.json(d)
7733
7734 - def as_yaml(self, sanitize=True, field_options=True):
7735 if not have_serializers: 7736 raise ImportError("No YAML serializers available") 7737 d = self.as_dict(flat=True, sanitize=sanitize, 7738 field_options=field_options) 7739 return serializers.yaml(d)
7740
7741 - def __contains__(self, tablename):
7742 try: 7743 return tablename in self.tables 7744 except AttributeError: 7745 # The instance has no .tables attribute yet 7746 return False
7747 7748 has_key = __contains__ 7749
7750 - def get(self,key,default=None):
7751 return self.__dict__.get(key,default)
7752
7753 - def __iter__(self):
7754 for tablename in self.tables: 7755 yield self[tablename]
7756
7757 - def __getitem__(self, key):
7758 return self.__getattr__(str(key))
7759
7760 - def __getattr__(self, key):
7761 if ogetattr(self,'_lazy_tables') and \ 7762 key in ogetattr(self,'_LAZY_TABLES'): 7763 tablename, fields, args = self._LAZY_TABLES.pop(key) 7764 return self.lazy_define_table(tablename,*fields,**args) 7765 return ogetattr(self, key)
7766
7767 - def __setitem__(self, key, value):
7768 osetattr(self, str(key), value)
7769
7770 - def __setattr__(self, key, value):
7771 if key[:1]!='_' and key in self: 7772 raise SyntaxError( 7773 'Object %s exists and cannot be redefined' % key) 7774 osetattr(self,key,value)
7775 7776 __delitem__ = object.__delattr__ 7777
7778 - def __repr__(self):
7779 if hasattr(self,'_uri'): 7780 return '<DAL uri="%s">' % hide_password(str(self._uri)) 7781 else: 7782 return '<DAL db_uid="%s">' % self._db_uid
7783
7784 - def smart_query(self,fields,text):
7785 return Set(self, smart_query(fields,text))
7786
7787 - def __call__(self, query=None, ignore_common_filters=None):
7788 if isinstance(query,Table): 7789 query = self._adapter.id_query(query) 7790 elif isinstance(query,Field): 7791 query = query!=None 7792 elif isinstance(query, dict): 7793 icf = query.get("ignore_common_filters") 7794 if icf: ignore_common_filters = icf 7795 return Set(self, query, ignore_common_filters=ignore_common_filters)
7796
7797 - def commit(self):
7798 self._adapter.commit()
7799
7800 - def rollback(self):
7801 self._adapter.rollback()
7802
7803 - def close(self):
7804 self._adapter.close() 7805 if self._db_uid in THREAD_LOCAL.db_instances: 7806 db_group = THREAD_LOCAL.db_instances[self._db_uid] 7807 db_group.remove(self) 7808 if not db_group: 7809 del THREAD_LOCAL.db_instances[self._db_uid]
7810
7811 - def executesql(self, query, placeholders=None, as_dict=False, 7812 fields=None, colnames=None):
7813 """ 7814 placeholders is optional and will always be None. 7815 If using raw SQL with placeholders, placeholders may be 7816 a sequence of values to be substituted in 7817 or, (if supported by the DB driver), a dictionary with keys 7818 matching named placeholders in your SQL. 7819 7820 Added 2009-12-05 "as_dict" optional argument. Will always be 7821 None when using DAL. If using raw SQL can be set to True 7822 and the results cursor returned by the DB driver will be 7823 converted to a sequence of dictionaries keyed with the db 7824 field names. Tested with SQLite but should work with any database 7825 since the cursor.description used to get field names is part of the 7826 Python dbi 2.0 specs. Results returned with as_dict=True are 7827 the same as those returned when applying .to_list() to a DAL query. 7828 7829 [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] 7830 7831 Added 2012-08-24 "fields" and "colnames" optional arguments. If either 7832 is provided, the results cursor returned by the DB driver will be 7833 converted to a DAL Rows object using the db._adapter.parse() method. 7834 7835 The "fields" argument is a list of DAL Field objects that match the 7836 fields returned from the DB. The Field objects should be part of one or 7837 more Table objects defined on the DAL object. The "fields" list can 7838 include one or more DAL Table objects in addition to or instead of 7839 including Field objects, or it can be just a single table (not in a 7840 list). In that case, the Field objects will be extracted from the 7841 table(s). 7842 7843 Instead of specifying the "fields" argument, the "colnames" argument 7844 can be specified as a list of field names in tablename.fieldname format. 7845 Again, these should represent tables and fields defined on the DAL 7846 object. 7847 7848 It is also possible to specify both "fields" and the associated 7849 "colnames". In that case, "fields" can also include DAL Expression 7850 objects in addition to Field objects. For Field objects in "fields", 7851 the associated "colnames" must still be in tablename.fieldname format. 7852 For Expression objects in "fields", the associated "colnames" can 7853 be any arbitrary labels. 7854 7855 Note, the DAL Table objects referred to by "fields" or "colnames" can 7856 be dummy tables and do not have to represent any real tables in the 7857 database. Also, note that the "fields" and "colnames" must be in the 7858 same order as the fields in the results cursor returned from the DB. 7859 """ 7860 adapter = self._adapter 7861 if placeholders: 7862 adapter.execute(query, placeholders) 7863 else: 7864 adapter.execute(query) 7865 if as_dict: 7866 if not hasattr(adapter.cursor,'description'): 7867 raise RuntimeError("database does not support executesql(...,as_dict=True)") 7868 # Non-DAL legacy db query, converts cursor results to dict. 7869 # sequence of 7-item sequences. each sequence tells about a column. 7870 # first item is always the field name according to Python Database API specs 7871 columns = adapter.cursor.description 7872 # reduce the column info down to just the field names 7873 fields = [f[0] for f in columns] 7874 # will hold our finished resultset in a list 7875 data = adapter._fetchall() 7876 # convert the list for each row into a dictionary so it's 7877 # easier to work with. row['field_name'] rather than row[0] 7878 return [dict(zip(fields,row)) for row in data] 7879 try: 7880 data = adapter._fetchall() 7881 except: 7882 return None 7883 if fields or colnames: 7884 fields = [] if fields is None else fields 7885 if not isinstance(fields, list): 7886 fields = [fields] 7887 extracted_fields = [] 7888 for field in fields: 7889 if isinstance(field, Table): 7890 extracted_fields.extend([f for f in field]) 7891 else: 7892 extracted_fields.append(field) 7893 if not colnames: 7894 colnames = ['%s.%s' % (f.tablename, f.name) 7895 for f in extracted_fields] 7896 data = adapter.parse( 7897 data, fields=extracted_fields, colnames=colnames) 7898 return data
7899
7900 - def _remove_references_to(self, thistable):
7901 for table in self: 7902 table._referenced_by = [field for field in table._referenced_by 7903 if not field.table==thistable]
7904
7905 - def export_to_csv_file(self, ofile, *args, **kwargs):
7906 step = int(kwargs.get('max_fetch_rows,',500)) 7907 write_colnames = kwargs['write_colnames'] = \ 7908 kwargs.get("write_colnames", True) 7909 for table in self.tables: 7910 ofile.write('TABLE %s\r\n' % table) 7911 query = self._adapter.id_query(self[table]) 7912 nrows = self(query).count() 7913 kwargs['write_colnames'] = write_colnames 7914 for k in range(0,nrows,step): 7915 self(query).select(limitby=(k,k+step)).export_to_csv_file( 7916 ofile, *args, **kwargs) 7917 kwargs['write_colnames'] = False 7918 ofile.write('\r\n\r\n') 7919 ofile.write('END')
7920
7921 - def import_from_csv_file(self, ifile, id_map=None, null='<NULL>', 7922 unique='uuid', *args, **kwargs):
7923 #if id_map is None: id_map={} 7924 id_offset = {} # only used if id_map is None 7925 for line in ifile: 7926 line = line.strip() 7927 if not line: 7928 continue 7929 elif line == 'END': 7930 return 7931 elif not line.startswith('TABLE ') or not line[6:] in self.tables: 7932 raise SyntaxError('invalid file format') 7933 else: 7934 tablename = line[6:] 7935 self[tablename].import_from_csv_file( 7936 ifile, id_map, null, unique, id_offset, *args, **kwargs)
7937
7938 -def DAL_unpickler(db_uid):
7939 return DAL('<zombie>',db_uid=db_uid)
7940
7941 -def DAL_pickler(db):
7942 return DAL_unpickler, (db._db_uid,)
7943 7944 copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
7945 7946 -class SQLALL(object):
7947 """ 7948 Helper class providing a comma-separated string having all the field names 7949 (prefixed by table name and '.') 7950 7951 normally only called from within gluon.sql 7952 """ 7953
7954 - def __init__(self, table):
7955 self._table = table
7956
7957 - def __str__(self):
7958 return ', '.join([str(field) for field in self._table])
7959
7960 # class Reference(int): 7961 -class Reference(long):
7962
7963 - def __allocate(self):
7964 if not self._record: 7965 self._record = self._table[int(self)] 7966 if not self._record: 7967 raise RuntimeError( 7968 "Using a recursive select but encountered a broken reference: %s %d"%(self._table, int(self)))
7969
7970 - def __getattr__(self, key):
7971 if key == 'id': 7972 return int(self) 7973 self.__allocate() 7974 return self._record.get(key, None)
7975
7976 - def get(self, key, default=None):
7977 return self.__getattr__(key, default)
7978
7979 - def __setattr__(self, key, value):
7980 if key.startswith('_'): 7981 int.__setattr__(self, key, value) 7982 return 7983 self.__allocate() 7984 self._record[key] = value
7985
7986 - def __getitem__(self, key):
7987 if key == 'id': 7988 return int(self) 7989 self.__allocate() 7990 return self._record.get(key, None)
7991
7992 - def __setitem__(self,key,value):
7993 self.__allocate() 7994 self._record[key] = value
7995
7996 7997 -def Reference_unpickler(data):
7998 return marshal.loads(data)
7999
8000 -def Reference_pickler(data):
8001 try: 8002 marshal_dump = marshal.dumps(int(data)) 8003 except AttributeError: 8004 marshal_dump = 'i%s' % struct.pack('<i', int(data)) 8005 return (Reference_unpickler, (marshal_dump,))
8006 8007 copyreg.pickle(Reference, Reference_pickler, Reference_unpickler)
8008 8009 8010 -class Table(object):
8011 8012 """ 8013 an instance of this class represents a database table 8014 8015 Example:: 8016 8017 db = DAL(...) 8018 db.define_table('users', Field('name')) 8019 db.users.insert(name='me') # print db.users._insert(...) to see SQL 8020 db.users.drop() 8021 """ 8022
8023 - def __init__( 8024 self, 8025 db, 8026 tablename, 8027 *fields, 8028 **args 8029 ):
8030 """ 8031 Initializes the table and performs checking on the provided fields. 8032 8033 Each table will have automatically an 'id'. 8034 8035 If a field is of type Table, the fields (excluding 'id') from that table 8036 will be used instead. 8037 8038 :raises SyntaxError: when a supplied field is of incorrect type. 8039 """ 8040 self._actual = False # set to True by define_table() 8041 self._tablename = tablename 8042 self._sequence_name = args.get('sequence_name',None) or \ 8043 db and db._adapter.sequence_name(tablename) 8044 self._trigger_name = args.get('trigger_name',None) or \ 8045 db and db._adapter.trigger_name(tablename) 8046 self._common_filter = args.get('common_filter', None) 8047 self._format = args.get('format',None) 8048 self._singular = args.get( 8049 'singular',tablename.replace('_',' ').capitalize()) 8050 self._plural = args.get( 8051 'plural',pluralize(self._singular.lower()).capitalize()) 8052 # horrible but for backard compatibility of appamdin: 8053 if 'primarykey' in args and args['primarykey']: 8054 self._primarykey = args.get('primarykey', None) 8055 8056 self._before_insert = [] 8057 self._before_update = [Set.delete_uploaded_files] 8058 self._before_delete = [Set.delete_uploaded_files] 8059 self._after_insert = [] 8060 self._after_update = [] 8061 self._after_delete = [] 8062 8063 fieldnames,newfields=set(),[] 8064 if hasattr(self,'_primarykey'): 8065 if not isinstance(self._primarykey,list): 8066 raise SyntaxError( 8067 "primarykey must be a list of fields from table '%s'" \ 8068 % tablename) 8069 if len(self._primarykey)==1: 8070 self._id = [f for f in fields if isinstance(f,Field) \ 8071 and f.name==self._primarykey[0]][0] 8072 elif not [f for f in fields if isinstance(f,Field) and f.type=='id']: 8073 field = Field('id', 'id') 8074 newfields.append(field) 8075 fieldnames.add('id') 8076 self._id = field 8077 virtual_fields = [] 8078 for field in fields: 8079 if isinstance(field, (FieldMethod, FieldVirtual)): 8080 virtual_fields.append(field) 8081 elif isinstance(field, Field) and not field.name in fieldnames: 8082 if field.db is not None: 8083 field = copy.copy(field) 8084 newfields.append(field) 8085 fieldnames.add(field.name) 8086 if field.type=='id': 8087 self._id = field 8088 elif isinstance(field, Table): 8089 table = field 8090 for field in table: 8091 if not field.name in fieldnames and not field.type=='id': 8092 t2 = not table._actual and self._tablename 8093 field = field.clone(point_self_references_to=t2) 8094 newfields.append(field) 8095 fieldnames.add(field.name) 8096 elif not isinstance(field, (Field, Table)): 8097 raise SyntaxError( 8098 'define_table argument is not a Field or Table: %s' % field) 8099 fields = newfields 8100 self._db = db 8101 tablename = tablename 8102 self._fields = SQLCallableList() 8103 self.virtualfields = [] 8104 fields = list(fields) 8105 8106 if db and db._adapter.uploads_in_blob==True: 8107 uploadfields = [f.name for f in fields if f.type=='blob'] 8108 for field in fields: 8109 fn = field.uploadfield 8110 if isinstance(field, Field) and field.type == 'upload'\ 8111 and fn is True: 8112 fn = field.uploadfield = '%s_blob' % field.name 8113 if isinstance(fn,str) and not fn in uploadfields: 8114 fields.append(Field(fn,'blob',default='', 8115 writable=False,readable=False)) 8116 8117 lower_fieldnames = set() 8118 reserved = dir(Table) + ['fields'] 8119 for field in fields: 8120 field_name = field.name 8121 if db and db.check_reserved: 8122 db.check_reserved_keyword(field_name) 8123 elif field_name in reserved: 8124 raise SyntaxError("field name %s not allowed" % field_name) 8125 8126 if field_name.lower() in lower_fieldnames: 8127 raise SyntaxError("duplicate field %s in table %s" \ 8128 % (field_name, tablename)) 8129 else: 8130 lower_fieldnames.add(field_name.lower()) 8131 8132 self.fields.append(field_name) 8133 self[field_name] = field 8134 if field.type == 'id': 8135 self['id'] = field 8136 field.tablename = field._tablename = tablename 8137 field.table = field._table = self 8138 field.db = field._db = db 8139 if db and not field.type in ('text', 'blob', 'json') and \ 8140 db._adapter.maxcharlength < field.length: 8141 field.length = db._adapter.maxcharlength 8142 self.ALL = SQLALL(self) 8143 8144 if hasattr(self,'_primarykey'): 8145 for k in self._primarykey: 8146 if k not in self.fields: 8147 raise SyntaxError( 8148 "primarykey must be a list of fields from table '%s " % tablename) 8149 else: 8150 self[k].notnull = True 8151 for field in virtual_fields: 8152 self[field.name] = field
8153 8154 @property
8155 - def fields(self):
8156 return self._fields
8157
8158 - def update(self,*args,**kwargs):
8159 raise RuntimeError("Syntax Not Supported")
8160
8161 - def _enable_record_versioning(self, 8162 archive_db=None, 8163 archive_name = '%(tablename)s_archive', 8164 current_record = 'current_record', 8165 is_active = 'is_active'):
8166 archive_db = archive_db or self._db 8167 archive_name = archive_name % dict(tablename=self._tablename) 8168 if archive_name in archive_db.tables(): 8169 return # do not try define the archive if already exists 8170 fieldnames = self.fields() 8171 field_type = self if archive_db is self._db else 'bigint' 8172 archive_db.define_table( 8173 archive_name, 8174 Field(current_record,field_type), 8175 *[field.clone(unique=False) for field in self]) 8176 self._before_update.append( 8177 lambda qset,fs,db=archive_db,an=archive_name,cn=current_record: 8178 archive_record(qset,fs,db[an],cn)) 8179 if is_active and is_active in fieldnames: 8180 self._before_delete.append( 8181 lambda qset: qset.update(is_active=False)) 8182 newquery = lambda query, t=self: t.is_active == True 8183 query = self._common_filter 8184 if query: 8185 newquery = query & newquery 8186 self._common_filter = newquery
8187
8188 - def _validate(self,**vars):
8189 errors = Row() 8190 for key,value in vars.iteritems(): 8191 value,error = self[key].validate(value) 8192 if error: 8193 errors[key] = error 8194 return errors
8195
8196 - def _create_references(self):
8197 db = self._db 8198 pr = db._pending_references 8199 self._referenced_by = [] 8200 for field in self: 8201 fieldname = field.name 8202 field_type = field.type 8203 if isinstance(field_type,str) and field_type[:10] == 'reference ': 8204 ref = field_type[10:].strip() 8205 if not ref.split(): 8206 raise SyntaxError('Table: reference to nothing: %s' %ref) 8207 refs = ref.split('.') 8208 rtablename = refs[0] 8209 if not rtablename in db: 8210 pr[rtablename] = pr.get(rtablename,[]) + [field] 8211 continue 8212 rtable = db[rtablename] 8213 if len(refs)==2: 8214 rfieldname = refs[1] 8215 if not hasattr(rtable,'_primarykey'): 8216 raise SyntaxError( 8217 'keyed tables can only reference other keyed tables (for now)') 8218 if rfieldname not in rtable.fields: 8219 raise SyntaxError( 8220 "invalid field '%s' for referenced table '%s' in table '%s'" \ 8221 % (rfieldname, rtablename, self._tablename)) 8222 rtable._referenced_by.append(field) 8223 for referee in pr.get(self._tablename,[]): 8224 self._referenced_by.append(referee)
8225
8226 - def _filter_fields(self, record, id=False):
8227 return dict([(k, v) for (k, v) in record.iteritems() if k 8228 in self.fields and (self[k].type!='id' or id)])
8229
8230 - def _build_query(self,key):
8231 """ for keyed table only """ 8232 query = None 8233 for k,v in key.iteritems(): 8234 if k in self._primarykey: 8235 if query: 8236 query = query & (self[k] == v) 8237 else: 8238 query = (self[k] == v) 8239 else: 8240 raise SyntaxError( 8241 'Field %s is not part of the primary key of %s' % \ 8242 (k,self._tablename)) 8243 return query
8244
8245 - def __getitem__(self, key):
8246 if not key: 8247 return None 8248 elif isinstance(key, dict): 8249 """ for keyed table """ 8250 query = self._build_query(key) 8251 rows = self._db(query).select() 8252 if rows: 8253 return rows[0] 8254 return None 8255 elif str(key).isdigit() or 'google' in DRIVERS and isinstance(key, Key): 8256 return self._db(self._id == key).select(limitby=(0,1)).first() 8257 elif key: 8258 return ogetattr(self, str(key))
8259
8260 - def __call__(self, key=DEFAULT, **kwargs):
8261 for_update = kwargs.get('_for_update',False) 8262 if '_for_update' in kwargs: del kwargs['_for_update'] 8263 8264 orderby = kwargs.get('_orderby',None) 8265 if '_orderby' in kwargs: del kwargs['_orderby'] 8266 8267 if not key is DEFAULT: 8268 if isinstance(key, Query): 8269 record = self._db(key).select( 8270 limitby=(0,1),for_update=for_update, orderby=orderby).first() 8271 elif not str(key).isdigit(): 8272 record = None 8273 else: 8274 record = self._db(self._id == key).select( 8275 limitby=(0,1),for_update=for_update, orderby=orderby).first() 8276 if record: 8277 for k,v in kwargs.iteritems(): 8278 if record[k]!=v: return None 8279 return record 8280 elif kwargs: 8281 query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.iteritems()]) 8282 return self._db(query).select(limitby=(0,1),for_update=for_update, orderby=orderby).first() 8283 else: 8284 return None
8285
8286 - def __setitem__(self, key, value):
8287 if isinstance(key, dict) and isinstance(value, dict): 8288 """ option for keyed table """ 8289 if set(key.keys()) == set(self._primarykey): 8290 value = self._filter_fields(value) 8291 kv = {} 8292 kv.update(value) 8293 kv.update(key) 8294 if not self.insert(**kv): 8295 query = self._build_query(key) 8296 self._db(query).update(**self._filter_fields(value)) 8297 else: 8298 raise SyntaxError( 8299 'key must have all fields from primary key: %s'%\ 8300 (self._primarykey)) 8301 elif str(key).isdigit(): 8302 if key == 0: 8303 self.insert(**self._filter_fields(value)) 8304 elif self._db(self._id == key)\ 8305 .update(**self._filter_fields(value)) is None: 8306 raise SyntaxError('No such record: %s' % key) 8307 else: 8308 if isinstance(key, dict): 8309 raise SyntaxError( 8310 'value must be a dictionary: %s' % value) 8311 osetattr(self, str(key), value)
8312 8313 __getattr__ = __getitem__ 8314
8315 - def __setattr__(self, key, value):
8316 if key[:1]!='_' and key in self: 8317 raise SyntaxError('Object exists and cannot be redefined: %s' % key) 8318 osetattr(self,key,value)
8319
8320 - def __delitem__(self, key):
8321 if isinstance(key, dict): 8322 query = self._build_query(key) 8323 if not self._db(query).delete(): 8324 raise SyntaxError('No such record: %s' % key) 8325 elif not str(key).isdigit() or \ 8326 not self._db(self._id == key).delete(): 8327 raise SyntaxError('No such record: %s' % key)
8328
8329 - def __contains__(self,key):
8330 return hasattr(self,key)
8331 8332 has_key = __contains__ 8333
8334 - def items(self):
8335 return self.__dict__.items()
8336
8337 - def __iter__(self):
8338 for fieldname in self.fields: 8339 yield self[fieldname]
8340
8341 - def iteritems(self):
8342 return self.__dict__.iteritems()
8343 8344
8345 - def __repr__(self):
8346 return '<Table %s (%s)>' % (self._tablename,','.join(self.fields()))
8347
8348 - def __str__(self):
8349 if hasattr(self,'_ot') and self._ot is not None: 8350 if 'Oracle' in str(type(self._db._adapter)): # <<< patch 8351 return '%s %s' % (self._ot, self._tablename) # <<< patch 8352 return '%s AS %s' % (self._ot, self._tablename) 8353 return self._tablename
8354
8355 - def _drop(self, mode = ''):
8356 return self._db._adapter._drop(self, mode)
8357
8358 - def drop(self, mode = ''):
8359 return self._db._adapter.drop(self,mode)
8360
8361 - def _listify(self,fields,update=False):
8362 new_fields = {} # format: new_fields[name] = (field,value) 8363 8364 # store all fields passed as input in new_fields 8365 for name in fields: 8366 if not name in self.fields: 8367 if name != 'id': 8368 raise SyntaxError( 8369 'Field %s does not belong to the table' % name) 8370 else: 8371 field = self[name] 8372 value = fields[name] 8373 if field.filter_in: 8374 value = field.filter_in(value) 8375 new_fields[name] = (field,value) 8376 8377 # check all fields that should be in the table but are not passed 8378 to_compute = [] 8379 for ofield in self: 8380 name = ofield.name 8381 if not name in new_fields: 8382 # if field is supposed to be computed, compute it! 8383 if ofield.compute: # save those to compute for later 8384 to_compute.append((name,ofield)) 8385 # if field is required, check its default value 8386 elif not update and not ofield.default is None: 8387 value = ofield.default 8388 fields[name] = value 8389 new_fields[name] = (ofield,value) 8390 # if this is an update, user the update field instead 8391 elif update and not ofield.update is None: 8392 value = ofield.update 8393 fields[name] = value 8394 new_fields[name] = (ofield,value) 8395 # if the field is still not there but it should, error 8396 elif not update and ofield.required: 8397 raise RuntimeError( 8398 'Table: missing required field: %s' % name) 8399 # now deal with fields that are supposed to be computed 8400 if to_compute: 8401 row = Row(fields) 8402 for name,ofield in to_compute: 8403 # try compute it 8404 try: 8405 new_fields[name] = (ofield,ofield.compute(row)) 8406 except (KeyError, AttributeError): 8407 # error sinlently unless field is required! 8408 if ofield.required: 8409 raise SyntaxError('unable to comput field: %s' % name) 8410 return new_fields.values()
8411
8412 - def _attempt_upload(self, fields):
8413 for field in self: 8414 if field.type=='upload' and field.name in fields: 8415 value = fields[field.name] 8416 if value and not isinstance(value,str): 8417 if hasattr(value,'file') and hasattr(value,'filename'): 8418 new_name = field.store(value.file,filename=value.filename) 8419 elif hasattr(value,'read') and hasattr(value,'name'): 8420 new_name = field.store(value,filename=value.name) 8421 else: 8422 raise RuntimeError("Unable to handle upload") 8423 fields[field.name] = new_name
8424
8425 - def _defaults(self, fields):
8426 "If there are no fields/values specified, return table defaults" 8427 if not fields: 8428 fields = {} 8429 for field in self: 8430 if field.type != "id": 8431 fields[field.name] = field.default 8432 return fields
8433
8434 - def _insert(self, **fields):
8435 fields = self._defaults(fields) 8436 return self._db._adapter._insert(self, self._listify(fields))
8437
8438 - def insert(self, **fields):
8439 fields = self._defaults(fields) 8440 self._attempt_upload(fields) 8441 if any(f(fields) for f in self._before_insert): return 0 8442 ret = self._db._adapter.insert(self, self._listify(fields)) 8443 if ret and self._after_insert: 8444 fields = Row(fields) 8445 [f(fields,ret) for f in self._after_insert] 8446 return ret
8447
8448 - def validate_and_insert(self,**fields):
8449 response = Row() 8450 response.errors = Row() 8451 new_fields = copy.copy(fields) 8452 for key,value in fields.iteritems(): 8453 value,error = self[key].validate(value) 8454 if error: 8455 response.errors[key] = "%s" % error 8456 else: 8457 new_fields[key] = value 8458 if not response.errors: 8459 response.id = self.insert(**new_fields) 8460 else: 8461 response.id = None 8462 return response
8463
8464 - def update_or_insert(self, _key=DEFAULT, **values):
8465 if _key is DEFAULT: 8466 record = self(**values) 8467 elif isinstance(_key,dict): 8468 record = self(**_key) 8469 else: 8470 record = self(_key) 8471 if record: 8472 record.update_record(**values) 8473 newid = None 8474 else: 8475 newid = self.insert(**values) 8476 return newid
8477
8478 - def bulk_insert(self, items):
8479 """ 8480 here items is a list of dictionaries 8481 """ 8482 items = [self._listify(item) for item in items] 8483 if any(f(item) for item in items for f in self._before_insert):return 0 8484 ret = self._db._adapter.bulk_insert(self,items) 8485 ret and [[f(item,ret[k]) for k,item in enumerate(items)] for f in self._after_insert] 8486 return ret
8487
8488 - def _truncate(self, mode = None):
8489 return self._db._adapter._truncate(self, mode)
8490
8491 - def truncate(self, mode = None):
8492 return self._db._adapter.truncate(self, mode)
8493
8494 - def import_from_csv_file( 8495 self, 8496 csvfile, 8497 id_map=None, 8498 null='<NULL>', 8499 unique='uuid', 8500 id_offset=None, # id_offset used only when id_map is None 8501 *args, **kwargs 8502 ):
8503 """ 8504 Import records from csv file. 8505 Column headers must have same names as table fields. 8506 Field 'id' is ignored. 8507 If column names read 'table.file' the 'table.' prefix is ignored. 8508 'unique' argument is a field which must be unique 8509 (typically a uuid field) 8510 'restore' argument is default False; 8511 if set True will remove old values in table first. 8512 'id_map' ff set to None will not map ids. 8513 The import will keep the id numbers in the restored table. 8514 This assumes that there is an field of type id that 8515 is integer and in incrementing order. 8516 Will keep the id numbers in restored table. 8517 """ 8518 8519 delimiter = kwargs.get('delimiter', ',') 8520 quotechar = kwargs.get('quotechar', '"') 8521 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 8522 restore = kwargs.get('restore', False) 8523 if restore: 8524 self._db[self].truncate() 8525 8526 reader = csv.reader(csvfile, delimiter=delimiter, 8527 quotechar=quotechar, quoting=quoting) 8528 colnames = None 8529 if isinstance(id_map, dict): 8530 if not self._tablename in id_map: 8531 id_map[self._tablename] = {} 8532 id_map_self = id_map[self._tablename] 8533 8534 def fix(field, value, id_map, id_offset): 8535 list_reference_s='list:reference' 8536 if value == null: 8537 value = None 8538 elif field.type=='blob': 8539 value = base64.b64decode(value) 8540 elif field.type=='double' or field.type=='float': 8541 if not value.strip(): 8542 value = None 8543 else: 8544 value = float(value) 8545 elif field.type in ('integer','bigint'): 8546 if not value.strip(): 8547 value = None 8548 else: 8549 value = int(value) 8550 elif field.type.startswith('list:string'): 8551 value = bar_decode_string(value) 8552 elif field.type.startswith(list_reference_s): 8553 ref_table = field.type[len(list_reference_s):].strip() 8554 if id_map is not None: 8555 value = [id_map[ref_table][int(v)] \ 8556 for v in bar_decode_string(value)] 8557 else: 8558 value = [v for v in bar_decode_string(value)] 8559 elif field.type.startswith('list:'): 8560 value = bar_decode_integer(value) 8561 elif id_map and field.type.startswith('reference'): 8562 try: 8563 value = id_map[field.type[9:].strip()][int(value)] 8564 except KeyError: 8565 pass 8566 elif id_offset and field.type.startswith('reference'): 8567 try: 8568 value = id_offset[field.type[9:].strip()]+int(value) 8569 except KeyError: 8570 pass 8571 return (field.name, value)
8572 8573 def is_id(colname): 8574 if colname in self: 8575 return self[colname].type == 'id' 8576 else: 8577 return False
8578 8579 first = True 8580 unique_idx = None 8581 for line in reader: 8582 if not line: 8583 break 8584 if not colnames: 8585 colnames = [x.split('.',1)[-1] for x in line][:len(line)] 8586 cols, cid = [], None 8587 for i,colname in enumerate(colnames): 8588 if is_id(colname): 8589 cid = i 8590 else: 8591 cols.append(i) 8592 if colname == unique: 8593 unique_idx = i 8594 else: 8595 items = [fix(self[colnames[i]], line[i], id_map, id_offset) \ 8596 for i in cols if colnames[i] in self.fields] 8597 8598 if not id_map and cid is not None and id_offset is not None and not unique_idx: 8599 csv_id = int(line[cid]) 8600 curr_id = self.insert(**dict(items)) 8601 if first: 8602 first = False 8603 # First curr_id is bigger than csv_id, 8604 # then we are not restoring but 8605 # extending db table with csv db table 8606 if curr_id>csv_id: 8607 id_offset[self._tablename] = curr_id-csv_id 8608 else: 8609 id_offset[self._tablename] = 0 8610 # create new id until we get the same as old_id+offset 8611 while curr_id<csv_id+id_offset[self._tablename]: 8612 self._db(self._db[self][colnames[cid]] == curr_id).delete() 8613 curr_id = self.insert(**dict(items)) 8614 # Validation. Check for duplicate of 'unique' &, 8615 # if present, update instead of insert. 8616 elif not unique_idx: 8617 new_id = self.insert(**dict(items)) 8618 else: 8619 unique_value = line[unique_idx] 8620 query = self._db[self][unique] == unique_value 8621 record = self._db(query).select().first() 8622 if record: 8623 record.update_record(**dict(items)) 8624 new_id = record[self._id.name] 8625 else: 8626 new_id = self.insert(**dict(items)) 8627 if id_map and cid is not None: 8628 id_map_self[int(line[cid])] = new_id 8629
8630 - def as_dict(self, flat=False, sanitize=True, field_options=True):
8631 tablename = str(self) 8632 table_as_dict = dict(name=tablename, items={}, fields=[], 8633 sequence_name=self._sequence_name, 8634 trigger_name=self._trigger_name, 8635 common_filter=self._common_filter, format=self._format, 8636 singular=self._singular, plural=self._plural) 8637 8638 for field in self: 8639 if (field.readable or field.writable) or (not sanitize): 8640 table_as_dict["fields"].append(field.name) 8641 table_as_dict["items"][field.name] = \ 8642 field.as_dict(flat=flat, sanitize=sanitize, 8643 options=field_options) 8644 return table_as_dict
8645
8646 - def as_xml(self, sanitize=True, field_options=True):
8647 if not have_serializers: 8648 raise ImportError("No xml serializers available") 8649 d = self.as_dict(flat=True, sanitize=sanitize, 8650 field_options=field_options) 8651 return serializers.xml(d)
8652
8653 - def as_json(self, sanitize=True, field_options=True):
8654 if not have_serializers: 8655 raise ImportError("No json serializers available") 8656 d = self.as_dict(flat=True, sanitize=sanitize, 8657 field_options=field_options) 8658 return serializers.json(d)
8659
8660 - def as_yaml(self, sanitize=True, field_options=True):
8661 if not have_serializers: 8662 raise ImportError("No YAML serializers available") 8663 d = self.as_dict(flat=True, sanitize=sanitize, 8664 field_options=field_options) 8665 return serializers.yaml(d)
8666
8667 - def with_alias(self, alias):
8668 return self._db._adapter.alias(self,alias)
8669
8670 - def on(self, query):
8671 return Expression(self._db,self._db._adapter.ON,self,query)
8672
8673 -def archive_record(qset,fs,archive_table,current_record):
8674 tablenames = qset.db._adapter.tables(qset.query) 8675 if len(tablenames)!=1: raise RuntimeError("cannot update join") 8676 table = qset.db[tablenames[0]] 8677 for row in qset.select(): 8678 fields = archive_table._filter_fields(row) 8679 fields[current_record] = row.id 8680 archive_table.insert(**fields) 8681 return False
8682
8683 8684 8685 -class Expression(object):
8686
8687 - def __init__( 8688 self, 8689 db, 8690 op, 8691 first=None, 8692 second=None, 8693 type=None, 8694 **optional_args 8695 ):
8696 8697 self.db = db 8698 self.op = op 8699 self.first = first 8700 self.second = second 8701 self._table = getattr(first,'_table',None) 8702 ### self._tablename = first._tablename ## CHECK 8703 if not type and first and hasattr(first,'type'): 8704 self.type = first.type 8705 else: 8706 self.type = type 8707 self.optional_args = optional_args
8708
8709 - def sum(self):
8710 db = self.db 8711 return Expression(db, db._adapter.AGGREGATE, self, 'SUM', self.type)
8712
8713 - def max(self):
8714 db = self.db 8715 return Expression(db, db._adapter.AGGREGATE, self, 'MAX', self.type)
8716
8717 - def min(self):
8718 db = self.db 8719 return Expression(db, db._adapter.AGGREGATE, self, 'MIN', self.type)
8720
8721 - def len(self):
8722 db = self.db 8723 return Expression(db, db._adapter.AGGREGATE, self, 'LENGTH', 'integer')
8724
8725 - def avg(self):
8726 db = self.db 8727 return Expression(db, db._adapter.AGGREGATE, self, 'AVG', self.type)
8728
8729 - def abs(self):
8730 db = self.db 8731 return Expression(db, db._adapter.AGGREGATE, self, 'ABS', self.type)
8732
8733 - def lower(self):
8734 db = self.db 8735 return Expression(db, db._adapter.LOWER, self, None, self.type)
8736
8737 - def upper(self):
8738 db = self.db 8739 return Expression(db, db._adapter.UPPER, self, None, self.type)
8740
8741 - def year(self):
8742 db = self.db 8743 return Expression(db, db._adapter.EXTRACT, self, 'year', 'integer')
8744
8745 - def month(self):
8746 db = self.db 8747 return Expression(db, db._adapter.EXTRACT, self, 'month', 'integer')
8748
8749 - def day(self):
8750 db = self.db 8751 return Expression(db, db._adapter.EXTRACT, self, 'day', 'integer')
8752
8753 - def hour(self):
8754 db = self.db 8755 return Expression(db, db._adapter.EXTRACT, self, 'hour', 'integer')
8756
8757 - def minutes(self):
8758 db = self.db 8759 return Expression(db, db._adapter.EXTRACT, self, 'minute', 'integer')
8760
8761 - def coalesce(self,*others):
8762 db = self.db 8763 return Expression(db, db._adapter.COALESCE, self, others, self.type)
8764
8765 - def coalesce_zero(self):
8766 db = self.db 8767 return Expression(db, db._adapter.COALESCE_ZERO, self, None, self.type)
8768
8769 - def seconds(self):
8770 db = self.db 8771 return Expression(db, db._adapter.EXTRACT, self, 'second', 'integer')
8772
8773 - def epoch(self):
8774 db = self.db 8775 return Expression(db, db._adapter.EPOCH, self, None, 'integer')
8776
8777 - def __getslice__(self, start, stop):
8778 db = self.db 8779 if start < 0: 8780 pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) 8781 else: 8782 pos0 = start + 1 8783 8784 if stop < 0: 8785 length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) 8786 elif stop == sys.maxint: 8787 length = self.len() 8788 else: 8789 length = '(%s - %s)' % (stop + 1, pos0) 8790 return Expression(db,db._adapter.SUBSTRING, 8791 self, (pos0, length), self.type)
8792
8793 - def __getitem__(self, i):
8794 return self[i:i + 1]
8795
8796 - def __str__(self):
8797 return self.db._adapter.expand(self,self.type)
8798
8799 - def __or__(self, other): # for use in sortby
8800 db = self.db 8801 return Expression(db,db._adapter.COMMA,self,other,self.type)
8802
8803 - def __invert__(self):
8804 db = self.db 8805 if hasattr(self,'_op') and self.op == db._adapter.INVERT: 8806 return self.first 8807 return Expression(db,db._adapter.INVERT,self,type=self.type)
8808
8809 - def __add__(self, other):
8810 db = self.db 8811 return Expression(db,db._adapter.ADD,self,other,self.type)
8812
8813 - def __sub__(self, other):
8814 db = self.db 8815 if self.type in ('integer','bigint'): 8816 result_type = 'integer' 8817 elif self.type in ['date','time','datetime','double','float']: 8818 result_type = 'double' 8819 else: 8820 raise SyntaxError("subtraction operation not supported for type") 8821 return Expression(db,db._adapter.SUB,self,other,result_type)
8822
8823 - def __mul__(self, other):
8824 db = self.db 8825 return Expression(db,db._adapter.MUL,self,other,self.type)
8826
8827 - def __div__(self, other):
8828 db = self.db 8829 return Expression(db,db._adapter.DIV,self,other,self.type)
8830
8831 - def __mod__(self, other):
8832 db = self.db 8833 return Expression(db,db._adapter.MOD,self,other,self.type)
8834
8835 - def __eq__(self, value):
8836 db = self.db 8837 return Query(db, db._adapter.EQ, self, value)
8838
8839 - def __ne__(self, value):
8840 db = self.db 8841 return Query(db, db._adapter.NE, self, value)
8842
8843 - def __lt__(self, value):
8844 db = self.db 8845 return Query(db, db._adapter.LT, self, value)
8846
8847 - def __le__(self, value):
8848 db = self.db 8849 return Query(db, db._adapter.LE, self, value)
8850
8851 - def __gt__(self, value):
8852 db = self.db 8853 return Query(db, db._adapter.GT, self, value)
8854
8855 - def __ge__(self, value):
8856 db = self.db 8857 return Query(db, db._adapter.GE, self, value)
8858
8859 - def like(self, value, case_sensitive=False):
8860 db = self.db 8861 op = case_sensitive and db._adapter.LIKE or db._adapter.ILIKE 8862 return Query(db, op, self, value)
8863
8864 - def regexp(self, value):
8865 db = self.db 8866 return Query(db, db._adapter.REGEXP, self, value)
8867
8868 - def belongs(self, *value):
8869 """ 8870 Accepts the following inputs: 8871 field.belongs(1,2) 8872 field.belongs((1,2)) 8873 field.belongs(query) 8874 8875 Does NOT accept: 8876 field.belongs(1) 8877 """ 8878 db = self.db 8879 if len(value) == 1: 8880 value = value[0] 8881 if isinstance(value,Query): 8882 value = db(value)._select(value.first._table._id) 8883 return Query(db, db._adapter.BELONGS, self, value)
8884
8885 - def startswith(self, value):
8886 db = self.db 8887 if not self.type in ('string', 'text', 'json'): 8888 raise SyntaxError("startswith used with incompatible field type") 8889 return Query(db, db._adapter.STARTSWITH, self, value)
8890
8891 - def endswith(self, value):
8892 db = self.db 8893 if not self.type in ('string', 'text', 'json'): 8894 raise SyntaxError("endswith used with incompatible field type") 8895 return Query(db, db._adapter.ENDSWITH, self, value)
8896
8897 - def contains(self, value, all=False, case_sensitive=False):
8898 """ 8899 The case_sensitive parameters is only useful for PostgreSQL 8900 For other RDMBs it is ignored and contains is always case in-sensitive 8901 For MongoDB and GAE contains is always case sensitive 8902 """ 8903 db = self.db 8904 if isinstance(value,(list, tuple)): 8905 subqueries = [self.contains(str(v).strip(),case_sensitive=case_sensitive) 8906 for v in value if str(v).strip()] 8907 if not subqueries: 8908 return self.contains('') 8909 else: 8910 return reduce(all and AND or OR,subqueries) 8911 if not self.type in ('string', 'text', 'json') and not self.type.startswith('list:'): 8912 raise SyntaxError("contains used with incompatible field type") 8913 return Query(db, db._adapter.CONTAINS, self, value, case_sensitive=case_sensitive)
8914
8915 - def with_alias(self, alias):
8916 db = self.db 8917 return Expression(db, db._adapter.AS, self, alias, self.type)
8918 8919 # GIS expressions 8920
8921 - def st_asgeojson(self, precision=15, options=0, version=1):
8922 return Expression(self.db, self.db._adapter.ST_ASGEOJSON, self, 8923 dict(precision=precision, options=options, 8924 version=version), 'string')
8925
8926 - def st_astext(self):
8927 db = self.db 8928 return Expression(db, db._adapter.ST_ASTEXT, self, type='string')
8929
8930 - def st_x(self):
8931 db = self.db 8932 return Expression(db, db._adapter.ST_X, self, type='string')
8933
8934 - def st_y(self):
8935 db = self.db 8936 return Expression(db, db._adapter.ST_Y, self, type='string')
8937
8938 - def st_distance(self, other):
8939 db = self.db 8940 return Expression(db,db._adapter.ST_DISTANCE,self,other, 'double')
8941
8942 - def st_simplify(self, value):
8943 db = self.db 8944 return Expression(db, db._adapter.ST_SIMPLIFY, self, value, self.type)
8945 8946 # GIS queries 8947
8948 - def st_contains(self, value):
8949 db = self.db 8950 return Query(db, db._adapter.ST_CONTAINS, self, value)
8951
8952 - def st_equals(self, value):
8953 db = self.db 8954 return Query(db, db._adapter.ST_EQUALS, self, value)
8955
8956 - def st_intersects(self, value):
8957 db = self.db 8958 return Query(db, db._adapter.ST_INTERSECTS, self, value)
8959
8960 - def st_overlaps(self, value):
8961 db = self.db 8962 return Query(db, db._adapter.ST_OVERLAPS, self, value)
8963
8964 - def st_touches(self, value):
8965 db = self.db 8966 return Query(db, db._adapter.ST_TOUCHES, self, value)
8967
8968 - def st_within(self, value):
8969 db = self.db 8970 return Query(db, db._adapter.ST_WITHIN, self, value)
8971
8972 # for use in both Query and sortby 8973 8974 8975 -class SQLCustomType(object):
8976 """ 8977 allows defining of custom SQL types 8978 8979 Example:: 8980 8981 decimal = SQLCustomType( 8982 type ='double', 8983 native ='integer', 8984 encoder =(lambda x: int(float(x) * 100)), 8985 decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) 8986 ) 8987 8988 db.define_table( 8989 'example', 8990 Field('value', type=decimal) 8991 ) 8992 8993 :param type: the web2py type (default = 'string') 8994 :param native: the backend type 8995 :param encoder: how to encode the value to store it in the backend 8996 :param decoder: how to decode the value retrieved from the backend 8997 :param validator: what validators to use ( default = None, will use the 8998 default validator for type) 8999 """ 9000
9001 - def __init__( 9002 self, 9003 type='string', 9004 native=None, 9005 encoder=None, 9006 decoder=None, 9007 validator=None, 9008 _class=None, 9009 ):
9010 9011 self.type = type 9012 self.native = native 9013 self.encoder = encoder or (lambda x: x) 9014 self.decoder = decoder or (lambda x: x) 9015 self.validator = validator 9016 self._class = _class or type
9017
9018 - def startswith(self, text=None):
9019 try: 9020 return self.type.startswith(self, text) 9021 except TypeError: 9022 return False
9023
9024 - def __getslice__(self, a=0, b=100):
9025 return None
9026
9027 - def __getitem__(self, i):
9028 return None
9029
9030 - def __str__(self):
9031 return self._class
9032
9033 -class FieldVirtual(object):
9034 - def __init__(self, name, f=None, ftype='string',label=None,table_name=None):
9035 # for backward compatibility 9036 (self.name, self.f) = (name, f) if f else ('unkown', name) 9037 self.type = ftype 9038 self.label = label or self.name.capitalize().replace('_',' ') 9039 self.represent = IDENTITY 9040 self.formatter = IDENTITY 9041 self.comment = None 9042 self.readable = True 9043 self.writable = False 9044 self.requires = None 9045 self.widget = None 9046 self.tablename = table_name 9047 self.filter_out = None
9048
9049 -class FieldMethod(object):
9050 - def __init__(self, name, f=None, handler=None):
9051 # for backward compatibility 9052 (self.name, self.f) = (name, f) if f else ('unkown', name) 9053 self.handler = handler
9054
9055 -def list_represent(x,r=None):
9056 return ', '.join(str(y) for y in x or [])
9057
9058 -class Field(Expression):
9059 9060 Virtual = FieldVirtual 9061 Method = FieldMethod 9062 Lazy = FieldMethod # for backward compatibility 9063 9064 """ 9065 an instance of this class represents a database field 9066 9067 example:: 9068 9069 a = Field(name, 'string', length=32, default=None, required=False, 9070 requires=IS_NOT_EMPTY(), ondelete='CASCADE', 9071 notnull=False, unique=False, 9072 uploadfield=True, widget=None, label=None, comment=None, 9073 uploadfield=True, # True means store on disk, 9074 # 'a_field_name' means store in this field in db 9075 # False means file content will be discarded. 9076 writable=True, readable=True, update=None, authorize=None, 9077 autodelete=False, represent=None, uploadfolder=None, 9078 uploadseparate=False # upload to separate directories by uuid_keys 9079 # first 2 character and tablename.fieldname 9080 # False - old behavior 9081 # True - put uploaded file in 9082 # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] 9083 # directory) 9084 uploadfs=None # a pyfilesystem where to store upload 9085 9086 to be used as argument of DAL.define_table 9087 9088 allowed field types: 9089 string, boolean, integer, double, text, blob, 9090 date, time, datetime, upload, password 9091 9092 strings must have a length of Adapter.maxcharlength by default (512 or 255 for mysql) 9093 fields should have a default or they will be required in SQLFORMs 9094 the requires argument is used to validate the field input in SQLFORMs 9095 9096 """ 9097
9098 - def __init__( 9099 self, 9100 fieldname, 9101 type='string', 9102 length=None, 9103 default=DEFAULT, 9104 required=False, 9105 requires=DEFAULT, 9106 ondelete='CASCADE', 9107 notnull=False, 9108 unique=False, 9109 uploadfield=True, 9110 widget=None, 9111 label=None, 9112 comment=None, 9113 writable=True, 9114 readable=True, 9115 update=None, 9116 authorize=None, 9117 autodelete=False, 9118 represent=None, 9119 uploadfolder=None, 9120 uploadseparate=False, 9121 uploadfs=None, 9122 compute=None, 9123 custom_store=None, 9124 custom_retrieve=None, 9125 custom_retrieve_file_properties=None, 9126 custom_delete=None, 9127 filter_in = None, 9128 filter_out = None, 9129 custom_qualifier = None, 9130 map_none = None, 9131 ):
9132 self._db = self.db = None # both for backward compatibility 9133 self.op = None 9134 self.first = None 9135 self.second = None 9136 self.name = fieldname = cleanup(fieldname) 9137 if not isinstance(fieldname,str) or hasattr(Table,fieldname) or \ 9138 fieldname[0] == '_' or REGEX_PYTHON_KEYWORDS.match(fieldname): 9139 raise SyntaxError('Field: invalid field name: %s' % fieldname) 9140 self.type = type if not isinstance(type, (Table,Field)) else 'reference %s' % type 9141 self.length = length if not length is None else DEFAULTLENGTH.get(self.type,512) 9142 self.default = default if default!=DEFAULT else (update or None) 9143 self.required = required # is this field required 9144 self.ondelete = ondelete.upper() # this is for reference fields only 9145 self.notnull = notnull 9146 self.unique = unique 9147 self.uploadfield = uploadfield 9148 self.uploadfolder = uploadfolder 9149 self.uploadseparate = uploadseparate 9150 self.uploadfs = uploadfs 9151 self.widget = widget 9152 self.comment = comment 9153 self.writable = writable 9154 self.readable = readable 9155 self.update = update 9156 self.authorize = authorize 9157 self.autodelete = autodelete 9158 self.represent = list_represent if \ 9159 represent==None and type in ('list:integer','list:string') else represent 9160 self.compute = compute 9161 self.isattachment = True 9162 self.custom_store = custom_store 9163 self.custom_retrieve = custom_retrieve 9164 self.custom_retrieve_file_properties = custom_retrieve_file_properties 9165 self.custom_delete = custom_delete 9166 self.filter_in = filter_in 9167 self.filter_out = filter_out 9168 self.custom_qualifier = custom_qualifier 9169 self.label = label if label!=None else fieldname.replace('_',' ').title() 9170 self.requires = requires if requires!=None else [] 9171 self.map_none = map_none
9172
9173 - def set_attributes(self,*args,**attributes):
9174 self.__dict__.update(*args,**attributes)
9175
9176 - def clone(self,point_self_references_to=False,**args):
9177 field = copy.copy(self) 9178 if point_self_references_to and \ 9179 field.type == 'reference %s'+field._tablename: 9180 field.type = 'reference %s' % point_self_references_to 9181 field.__dict__.update(args) 9182 return field
9183
9184 - def store(self, file, filename=None, path=None):
9185 if self.custom_store: 9186 return self.custom_store(file,filename,path) 9187 if isinstance(file, cgi.FieldStorage): 9188 filename = filename or file.filename 9189 file = file.file 9190 elif not filename: 9191 filename = file.name 9192 filename = os.path.basename(filename.replace('/', os.sep)\ 9193 .replace('\\', os.sep)) 9194 m = REGEX_STORE_PATTERN.search(filename) 9195 extension = m and m.group('e') or 'txt' 9196 uuid_key = web2py_uuid().replace('-', '')[-16:] 9197 encoded_filename = base64.b16encode(filename).lower() 9198 newfilename = '%s.%s.%s.%s' % \ 9199 (self._tablename, self.name, uuid_key, encoded_filename) 9200 newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension 9201 self_uploadfield = self.uploadfield 9202 if isinstance(self_uploadfield,Field): 9203 blob_uploadfield_name = self_uploadfield.uploadfield 9204 keys={self_uploadfield.name: newfilename, 9205 blob_uploadfield_name: file.read()} 9206 self_uploadfield.table.insert(**keys) 9207 elif self_uploadfield == True: 9208 if path: 9209 pass 9210 elif self.uploadfolder: 9211 path = self.uploadfolder 9212 elif self.db._adapter.folder: 9213 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9214 else: 9215 raise RuntimeError( 9216 "you must specify a Field(...,uploadfolder=...)") 9217 if self.uploadseparate: 9218 if self.uploadfs: 9219 raise RuntimeError("not supported") 9220 path = pjoin(path,"%s.%s" %(self._tablename, self.name), 9221 uuid_key[:2]) 9222 if not exists(path): 9223 os.makedirs(path) 9224 pathfilename = pjoin(path, newfilename) 9225 if self.uploadfs: 9226 dest_file = self.uploadfs.open(newfilename, 'wb') 9227 else: 9228 dest_file = open(pathfilename, 'wb') 9229 try: 9230 shutil.copyfileobj(file, dest_file) 9231 except IOError: 9232 raise IOError( 9233 'Unable to store file "%s" because invalid permissions, readonly file system, or filename too long' % pathfilename) 9234 dest_file.close() 9235 return newfilename
9236
9237 - def retrieve(self, name, path=None):
9238 self_uploadfield = self.uploadfield 9239 if self.custom_retrieve: 9240 return self.custom_retrieve(name, path) 9241 import http 9242 if self.authorize or isinstance(self_uploadfield, str): 9243 row = self.db(self == name).select().first() 9244 if not row: 9245 raise http.HTTP(404) 9246 if self.authorize and not self.authorize(row): 9247 raise http.HTTP(403) 9248 m = REGEX_UPLOAD_PATTERN.match(name) 9249 if not m or not self.isattachment: 9250 raise TypeError('Can\'t retrieve %s' % name) 9251 file_properties = self.retrieve_file_properties(name,path) 9252 filename = file_properties['filename'] 9253 if isinstance(self_uploadfield, str): # ## if file is in DB 9254 stream = StringIO.StringIO(row[self_uploadfield] or '') 9255 elif isinstance(self_uploadfield,Field): 9256 blob_uploadfield_name = self_uploadfield.uploadfield 9257 query = self_uploadfield == name 9258 data = self_uploadfield.table(query)[blob_uploadfield_name] 9259 stream = StringIO.StringIO(data) 9260 elif self.uploadfs: 9261 # ## if file is on pyfilesystem 9262 stream = self.uploadfs.open(name, 'rb') 9263 else: 9264 # ## if file is on regular filesystem 9265 stream = pjoin(file_properties['path'], name) 9266 return (filename, stream)
9267
9268 - def retrieve_file_properties(self, name, path=None):
9269 self_uploadfield = self.uploadfield 9270 if self.custom_retrieve_file_properties: 9271 return self.custom_retrieve_file_properties(name, path) 9272 try: 9273 m = REGEX_UPLOAD_PATTERN.match(name) 9274 if not m or not self.isattachment: 9275 raise TypeError('Can\'t retrieve %s file properties' % name) 9276 filename = base64.b16decode(m.group('name'), True) 9277 filename = REGEX_CLEANUP_FN.sub('_', filename) 9278 except (TypeError, AttributeError): 9279 filename = name 9280 if isinstance(self_uploadfield, str): # ## if file is in DB 9281 return dict(path=None,filename=filename) 9282 elif isinstance(self_uploadfield,Field): 9283 return dict(path=None,filename=filename) 9284 else: 9285 # ## if file is on filesystem 9286 if path: 9287 pass 9288 elif self.uploadfolder: 9289 path = self.uploadfolder 9290 else: 9291 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9292 if self.uploadseparate: 9293 t = m.group('table') 9294 f = m.group('field') 9295 u = m.group('uuidkey') 9296 path = pjoin(path,"%s.%s" % (t,f),u[:2]) 9297 return dict(path=path,filename=filename)
9298 9299
9300 - def formatter(self, value):
9301 requires = self.requires 9302 if value is None or not requires: 9303 return value or self.map_none 9304 if not isinstance(requires, (list, tuple)): 9305 requires = [requires] 9306 elif isinstance(requires, tuple): 9307 requires = list(requires) 9308 else: 9309 requires = copy.copy(requires) 9310 requires.reverse() 9311 for item in requires: 9312 if hasattr(item, 'formatter'): 9313 value = item.formatter(value) 9314 return value
9315
9316 - def validate(self, value):
9317 if not self.requires or self.requires == DEFAULT: 9318 return ((value if value!=self.map_none else None), None) 9319 requires = self.requires 9320 if not isinstance(requires, (list, tuple)): 9321 requires = [requires] 9322 for validator in requires: 9323 (value, error) = validator(value) 9324 if error: 9325 return (value, error) 9326 return ((value if value!=self.map_none else None), None)
9327
9328 - def count(self, distinct=None):
9329 return Expression(self.db, self.db._adapter.COUNT, self, distinct, 'integer')
9330
9331 - def as_dict(self, flat=False, sanitize=True, options=True):
9332 9333 attrs = ('type', 'length', 'default', 'required', 9334 'ondelete', 'notnull', 'unique', 'uploadfield', 9335 'widget', 'label', 'comment', 'writable', 'readable', 9336 'update', 'authorize', 'autodelete', 'represent', 9337 'uploadfolder', 'uploadseparate', 'uploadfs', 9338 'compute', 'custom_store', 'custom_retrieve', 9339 'custom_retrieve_file_properties', 'custom_delete', 9340 'filter_in', 'filter_out', 'custom_qualifier', 9341 'map_none', 'name') 9342 9343 SERIALIZABLE_TYPES = (int, long, basestring, dict, list, 9344 float, tuple, bool, type(None)) 9345 9346 def flatten(obj): 9347 if flat: 9348 if isinstance(obj, flatten.__class__): 9349 return str(type(obj)) 9350 elif isinstance(obj, type): 9351 try: 9352 return str(obj).split("'")[1] 9353 except IndexError: 9354 return str(obj) 9355 elif not isinstance(obj, SERIALIZABLE_TYPES): 9356 return str(obj) 9357 elif isinstance(obj, dict): 9358 newobj = dict() 9359 for k, v in obj.items(): 9360 newobj[k] = flatten(v) 9361 return newobj 9362 elif isinstance(obj, (list, tuple, set)): 9363 return [flatten(v) for v in obj] 9364 else: 9365 return obj 9366 elif isinstance(obj, (dict, set)): 9367 return obj.copy() 9368 else: return obj
9369 9370 def filter_requires(t, r, options=True): 9371 if sanitize and any([keyword in str(t).upper() for 9372 keyword in ("CRYPT", "IS_STRONG")]): 9373 return None 9374 9375 if not isinstance(r, dict): 9376 if options and hasattr(r, "options"): 9377 if callable(r.options): 9378 r.options() 9379 newr = r.__dict__.copy() 9380 else: 9381 newr = r.copy() 9382 9383 # remove options if not required 9384 if not options and newr.has_key("labels"): 9385 [newr.update({key:None}) for key in 9386 ("labels", "theset") if (key in newr)] 9387 9388 for k, v in newr.items(): 9389 if k == "other": 9390 if isinstance(v, dict): 9391 otype, other = v.popitem() 9392 else: 9393 otype = flatten(type(v)) 9394 other = v 9395 newr[k] = {otype: filter_requires(otype, other, 9396 options=options)} 9397 else: 9398 newr[k] = flatten(v) 9399 return newr
9400 9401 if isinstance(self.requires, (tuple, list, set)): 9402 requires = dict([(flatten(type(r)), 9403 filter_requires(type(r), r, 9404 options=options)) for 9405 r in self.requires]) 9406 else: 9407 requires = {flatten(type(self.requires)): 9408 filter_requires(type(self.requires), 9409 self.requires, options=options)} 9410 9411 d = dict(colname="%s.%s" % (self.tablename, self.name), 9412 requires=requires) 9413 d.update([(attr, flatten(getattr(self, attr))) for attr in attrs]) 9414 return d 9415
9416 - def as_xml(self, sanitize=True, options=True):
9417 if have_serializers: 9418 xml = serializers.xml 9419 else: 9420 raise ImportError("No xml serializers available") 9421 d = self.as_dict(flat=True, sanitize=sanitize, 9422 options=options) 9423 return xml(d)
9424
9425 - def as_json(self, sanitize=True, options=True):
9426 if have_serializers: 9427 json = serializers.json 9428 else: 9429 raise ImportError("No json serializers available") 9430 d = self.as_dict(flat=True, sanitize=sanitize, 9431 options=options) 9432 return json(d)
9433
9434 - def as_yaml(self, sanitize=True, options=True):
9435 if have_serializers: 9436 d = self.as_dict(flat=True, sanitize=sanitize, 9437 options=options) 9438 return serializers.yaml(d) 9439 else: 9440 raise ImportError("No YAML serializers available")
9441
9442 - def __nonzero__(self):
9443 return True
9444
9445 - def __str__(self):
9446 try: 9447 return '%s.%s' % (self.tablename, self.name) 9448 except: 9449 return '<no table>.%s' % self.name
9450
9451 9452 -class Query(object):
9453 9454 """ 9455 a query object necessary to define a set. 9456 it can be stored or can be passed to DAL.__call__() to obtain a Set 9457 9458 Example:: 9459 9460 query = db.users.name=='Max' 9461 set = db(query) 9462 records = set.select() 9463 9464 """ 9465
9466 - def __init__( 9467 self, 9468 db, 9469 op, 9470 first=None, 9471 second=None, 9472 ignore_common_filters = False, 9473 **optional_args 9474 ):
9475 self.db = self._db = db 9476 self.op = op 9477 self.first = first 9478 self.second = second 9479 self.ignore_common_filters = ignore_common_filters 9480 self.optional_args = optional_args
9481
9482 - def __repr__(self):
9483 return '<Query %s>' % BaseAdapter.expand(self.db._adapter,self)
9484
9485 - def __str__(self):
9486 return self.db._adapter.expand(self)
9487
9488 - def __and__(self, other):
9489 return Query(self.db,self.db._adapter.AND,self,other)
9490
9491 - def __or__(self, other):
9492 return Query(self.db,self.db._adapter.OR,self,other)
9493
9494 - def __invert__(self):
9495 if self.op==self.db._adapter.NOT: 9496 return self.first 9497 return Query(self.db,self.db._adapter.NOT,self)
9498
9499 - def __eq__(self, other):
9500 return repr(self) == repr(other)
9501
9502 - def __ne__(self, other):
9503 return not (self == other)
9504
9505 - def case(self,t=1,f=0):
9506 return self.db._adapter.CASE(self,t,f)
9507
9508 - def as_dict(self, flat=False, sanitize=True):
9509 """Experimental stuff 9510 9511 This allows to return a plain dictionary with the basic 9512 query representation. Can be used with json/xml services 9513 for client-side db I/O 9514 9515 Example: 9516 >>> q = db.auth_user.id != 0 9517 >>> q.as_dict(flat=True) 9518 {"op": "NE", "first":{"tablename": "auth_user", 9519 "fieldname": "id"}, 9520 "second":0} 9521 """ 9522 9523 SERIALIZABLE_TYPES = (tuple, dict, list, int, long, float, 9524 basestring, type(None), bool) 9525 def loop(d): 9526 newd = dict() 9527 for k, v in d.items(): 9528 if k in ("first", "second"): 9529 if isinstance(v, self.__class__): 9530 newd[k] = loop(v.__dict__) 9531 elif isinstance(v, Field): 9532 newd[k] = {"tablename": v._tablename, 9533 "fieldname": v.name} 9534 elif isinstance(v, Expression): 9535 newd[k] = loop(v.__dict__) 9536 elif isinstance(v, SERIALIZABLE_TYPES): 9537 newd[k] = v 9538 elif k == "op": 9539 if callable(v): 9540 newd[k] = v.__name__ 9541 elif isinstance(v, basestring): 9542 newd[k] = v 9543 else: pass # not callable or string 9544 elif isinstance(v, SERIALIZABLE_TYPES): 9545 if isinstance(v, dict): 9546 newd[k] = loop(v) 9547 else: newd[k] = v 9548 return newd
9549 9550 if flat: 9551 return loop(self.__dict__) 9552 else: return self.__dict__
9553 9554
9555 - def as_xml(self, sanitize=True):
9556 if have_serializers: 9557 xml = serializers.xml 9558 else: 9559 raise ImportError("No xml serializers available") 9560 d = self.as_dict(flat=True, sanitize=sanitize) 9561 return xml(d)
9562
9563 - def as_json(self, sanitize=True):
9564 if have_serializers: 9565 json = serializers.json 9566 else: 9567 raise ImportError("No json serializers available") 9568 d = self.as_dict(flat=True, sanitize=sanitize) 9569 return json(d)
9570
9571 -def xorify(orderby):
9572 if not orderby: 9573 return None 9574 orderby2 = orderby[0] 9575 for item in orderby[1:]: 9576 orderby2 = orderby2 | item 9577 return orderby2
9578
9579 -def use_common_filters(query):
9580 return (query and hasattr(query,'ignore_common_filters') and \ 9581 not query.ignore_common_filters)
9582
9583 -class Set(object):
9584 9585 """ 9586 a Set represents a set of records in the database, 9587 the records are identified by the query=Query(...) object. 9588 normally the Set is generated by DAL.__call__(Query(...)) 9589 9590 given a set, for example 9591 set = db(db.users.name=='Max') 9592 you can: 9593 set.update(db.users.name='Massimo') 9594 set.delete() # all elements in the set 9595 set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) 9596 and take subsets: 9597 subset = set(db.users.id<5) 9598 """ 9599
9600 - def __init__(self, db, query, ignore_common_filters = None):
9601 self.db = db 9602 self._db = db # for backward compatibility 9603 self.dquery = None 9604 9605 # if query is a dict, parse it 9606 if isinstance(query, dict): 9607 query = self.parse(query) 9608 9609 if not ignore_common_filters is None and \ 9610 use_common_filters(query) == ignore_common_filters: 9611 query = copy.copy(query) 9612 query.ignore_common_filters = ignore_common_filters 9613 self.query = query
9614
9615 - def __repr__(self):
9616 return '<Set %s>' % BaseAdapter.expand(self.db._adapter,self.query)
9617
9618 - def __call__(self, query, ignore_common_filters=False):
9619 if isinstance(query,Table): 9620 query = self.db._adapter.id_query(query) 9621 elif isinstance(query,str): 9622 query = Expression(self.db,query) 9623 elif isinstance(query,Field): 9624 query = query!=None 9625 if self.query: 9626 return Set(self.db, self.query & query, 9627 ignore_common_filters=ignore_common_filters) 9628 else: 9629 return Set(self.db, query, 9630 ignore_common_filters=ignore_common_filters)
9631
9632 - def _count(self,distinct=None):
9633 return self.db._adapter._count(self.query,distinct)
9634
9635 - def _select(self, *fields, **attributes):
9636 adapter = self.db._adapter 9637 tablenames = adapter.tables(self.query, 9638 attributes.get('join',None), 9639 attributes.get('left',None), 9640 attributes.get('orderby',None), 9641 attributes.get('groupby',None)) 9642 fields = adapter.expand_all(fields, tablenames) 9643 return adapter._select(self.query,fields,attributes)
9644
9645 - def _delete(self):
9646 db = self.db 9647 tablename = db._adapter.get_table(self.query) 9648 return db._adapter._delete(tablename,self.query)
9649
9650 - def _update(self, **update_fields):
9651 db = self.db 9652 tablename = db._adapter.get_table(self.query) 9653 fields = db[tablename]._listify(update_fields,update=True) 9654 return db._adapter._update(tablename,self.query,fields)
9655
9656 - def as_dict(self, flat=False, sanitize=True):
9657 if flat: 9658 uid = dbname = uri = None 9659 codec = self.db._db_codec 9660 if not sanitize: 9661 uri, dbname, uid = (self.db._dbname, str(self.db), 9662 self.db._db_uid) 9663 d = {"query": self.query.as_dict(flat=flat)} 9664 d["db"] = {"uid": uid, "codec": codec, 9665 "name": dbname, "uri": uri} 9666 return d 9667 else: return self.__dict__
9668
9669 - def as_xml(self, sanitize=True):
9670 if have_serializers: 9671 xml = serializers.xml 9672 else: 9673 raise ImportError("No xml serializers available") 9674 d = self.as_dict(flat=True, sanitize=sanitize) 9675 return xml(d)
9676
9677 - def as_json(self, sanitize=True):
9678 if have_serializers: 9679 json = serializers.json 9680 else: 9681 raise ImportError("No json serializers available") 9682 d = self.as_dict(flat=True, sanitize=sanitize) 9683 return json(d)
9684
9685 - def parse(self, dquery):
9686 "Experimental: Turn a dictionary into a Query object" 9687 self.dquery = dquery 9688 return self.build(self.dquery)
9689
9690 - def build(self, d):
9691 "Experimental: see .parse()" 9692 op, first, second = (d["op"], d["first"], 9693 d.get("second", None)) 9694 left = right = built = None 9695 9696 if op in ("AND", "OR"): 9697 if not (type(first), type(second)) == (dict, dict): 9698 raise SyntaxError("Invalid AND/OR query") 9699 if op == "AND": 9700 built = self.build(first) & self.build(second) 9701 else: built = self.build(first) | self.build(second) 9702 9703 elif op == "NOT": 9704 if first is None: 9705 raise SyntaxError("Invalid NOT query") 9706 built = ~self.build(first) 9707 else: 9708 # normal operation (GT, EQ, LT, ...) 9709 for k, v in {"left": first, "right": second}.items(): 9710 if isinstance(v, dict) and v.get("op"): 9711 v = self.build(v) 9712 if isinstance(v, dict) and ("tablename" in v): 9713 v = self.db[v["tablename"]][v["fieldname"]] 9714 if k == "left": left = v 9715 else: right = v 9716 9717 if hasattr(self.db._adapter, op): 9718 opm = getattr(self.db._adapter, op) 9719 9720 if op == "EQ": built = left == right 9721 elif op == "NE": built = left != right 9722 elif op == "GT": built = left > right 9723 elif op == "GE": built = left >= right 9724 elif op == "LT": built = left < right 9725 elif op == "LE": built = left <= right 9726 elif op in ("JOIN", "LEFT_JOIN", "RANDOM", "ALLOW_NULL"): 9727 built = Expression(self.db, opm) 9728 elif op in ("LOWER", "UPPER", "EPOCH", "PRIMARY_KEY", 9729 "COALESCE_ZERO", "RAW", "INVERT"): 9730 built = Expression(self.db, opm, left) 9731 elif op in ("COUNT", "EXTRACT", "AGGREGATE", "SUBSTRING", 9732 "REGEXP", "LIKE", "ILIKE", "STARTSWITH", 9733 "ENDSWITH", "ADD", "SUB", "MUL", "DIV", 9734 "MOD", "AS", "ON", "COMMA", "NOT_NULL", 9735 "COALESCE", "CONTAINS", "BELONGS"): 9736 built = Expression(self.db, opm, left, right) 9737 # expression as string 9738 elif not (left or right): built = Expression(self.db, op) 9739 else: 9740 raise SyntaxError("Operator not supported: %s" % op) 9741 9742 return built
9743
9744 - def isempty(self):
9745 return not self.select(limitby=(0,1))
9746
9747 - def count(self,distinct=None, cache=None):
9748 db = self.db 9749 if cache: 9750 cache_model, time_expire = cache 9751 sql = self._count(distinct=distinct) 9752 key = db._uri + '/' + sql 9753 if len(key)>200: key = hashlib_md5(key).hexdigest() 9754 return cache_model( 9755 key, 9756 (lambda self=self,distinct=distinct: \ 9757 db._adapter.count(self.query,distinct)), 9758 time_expire) 9759 return db._adapter.count(self.query,distinct)
9760
9761 - def select(self, *fields, **attributes):
9762 adapter = self.db._adapter 9763 tablenames = adapter.tables(self.query, 9764 attributes.get('join',None), 9765 attributes.get('left',None), 9766 attributes.get('orderby',None), 9767 attributes.get('groupby',None)) 9768 fields = adapter.expand_all(fields, tablenames) 9769 return adapter.select(self.query,fields,attributes)
9770
9771 - def nested_select(self,*fields,**attributes):
9772 return Expression(self.db,self._select(*fields,**attributes))
9773
9774 - def delete(self):
9775 db = self.db 9776 tablename = db._adapter.get_table(self.query) 9777 table = db[tablename] 9778 if any(f(self) for f in table._before_delete): return 0 9779 ret = db._adapter.delete(tablename,self.query) 9780 ret and [f(self) for f in table._after_delete] 9781 return ret
9782
9783 - def update(self, **update_fields):
9784 db = self.db 9785 tablename = db._adapter.get_table(self.query) 9786 table = db[tablename] 9787 table._attempt_upload(update_fields) 9788 if any(f(self,update_fields) for f in table._before_update): 9789 return 0 9790 fields = table._listify(update_fields,update=True) 9791 if not fields: 9792 raise SyntaxError("No fields to update") 9793 ret = db._adapter.update(tablename,self.query,fields) 9794 ret and [f(self,update_fields) for f in table._after_update] 9795 return ret
9796
9797 - def update_naive(self, **update_fields):
9798 """ 9799 same as update but does not call table._before_update and _after_update 9800 """ 9801 tablename = self.db._adapter.get_table(self.query) 9802 table = self.db[tablename] 9803 fields = table._listify(update_fields,update=True) 9804 if not fields: raise SyntaxError("No fields to update") 9805 ret = self.db._adapter.update(tablename,self.query,fields) 9806 return ret
9807
9808 - def validate_and_update(self, **update_fields):
9809 tablename = self.db._adapter.get_table(self.query) 9810 response = Row() 9811 response.errors = Row() 9812 new_fields = copy.copy(update_fields) 9813 for key,value in update_fields.iteritems(): 9814 value,error = self.db[tablename][key].validate(value) 9815 if error: 9816 response.errors[key] = error 9817 else: 9818 new_fields[key] = value 9819 table = self.db[tablename] 9820 if response.errors: 9821 response.updated = None 9822 else: 9823 if not any(f(self,new_fields) for f in table._before_update): 9824 fields = table._listify(new_fields,update=True) 9825 if not fields: raise SyntaxError("No fields to update") 9826 ret = self.db._adapter.update(tablename,self.query,fields) 9827 ret and [f(self,new_fields) for f in table._after_update] 9828 else: 9829 ret = 0 9830 response.updated = ret 9831 return response
9832
9833 - def delete_uploaded_files(self, upload_fields=None):
9834 table = self.db[self.db._adapter.tables(self.query)[0]] 9835 # ## mind uploadfield==True means file is not in DB 9836 if upload_fields: 9837 fields = upload_fields.keys() 9838 else: 9839 fields = table.fields 9840 fields = [f for f in fields if table[f].type == 'upload' 9841 and table[f].uploadfield == True 9842 and table[f].autodelete] 9843 if not fields: 9844 return False 9845 for record in self.select(*[table[f] for f in fields]): 9846 for fieldname in fields: 9847 field = table[fieldname] 9848 oldname = record.get(fieldname, None) 9849 if not oldname: 9850 continue 9851 if upload_fields and oldname == upload_fields[fieldname]: 9852 continue 9853 if field.custom_delete: 9854 field.custom_delete(oldname) 9855 else: 9856 uploadfolder = field.uploadfolder 9857 if not uploadfolder: 9858 uploadfolder = pjoin( 9859 self.db._adapter.folder, '..', 'uploads') 9860 if field.uploadseparate: 9861 items = oldname.split('.') 9862 uploadfolder = pjoin( 9863 uploadfolder, 9864 "%s.%s" % (items[0], items[1]), 9865 items[2][:2]) 9866 oldpath = pjoin(uploadfolder, oldname) 9867 if exists(oldpath): 9868 os.unlink(oldpath) 9869 return False
9870
9871 -class RecordUpdater(object):
9872 - def __init__(self, colset, table, id):
9873 self.colset, self.db, self.tablename, self.id = \ 9874 colset, table._db, table._tablename, id
9875
9876 - def __call__(self, **fields):
9877 colset, db, tablename, id = self.colset, self.db, self.tablename, self.id 9878 table = db[tablename] 9879 newfields = fields or dict(colset) 9880 for fieldname in newfields.keys(): 9881 if not fieldname in table.fields or table[fieldname].type=='id': 9882 del newfields[fieldname] 9883 table._db(table._id==id,ignore_common_filters=True).update(**newfields) 9884 colset.update(newfields) 9885 return colset
9886
9887 -class RecordDeleter(object):
9888 - def __init__(self, table, id):
9889 self.db, self.tablename, self.id = table._db, table._tablename, id
9890 - def __call__(self):
9891 return self.db(self.db[self.tablename]._id==self.id).delete()
9892
9893 -class LazySet(object):
9894 - def __init__(self, field, id):
9895 self.db, self.tablename, self.fieldname, self.id = \ 9896 field.db, field._tablename, field.name, id
9897 - def _getset(self):
9898 query = self.db[self.tablename][self.fieldname]==self.id 9899 return Set(self.db,query)
9900 - def __repr__(self):
9901 return repr(self._getset())
9902 - def __call__(self, query, ignore_common_filters=False):
9903 return self._getset()(query, ignore_common_filters)
9904 - def _count(self,distinct=None):
9905 return self._getset()._count(distinct)
9906 - def _select(self, *fields, **attributes):
9907 return self._getset()._select(*fields,**attributes)
9908 - def _delete(self):
9909 return self._getset()._delete()
9910 - def _update(self, **update_fields):
9911 return self._getset()._update(**update_fields)
9912 - def isempty(self):
9913 return self._getset().isempty()
9914 - def count(self,distinct=None, cache=None):
9915 return self._getset().count(distinct,cache)
9916 - def select(self, *fields, **attributes):
9917 return self._getset().select(*fields,**attributes)
9918 - def nested_select(self,*fields,**attributes):
9919 return self._getset().nested_select(*fields,**attributes)
9920 - def delete(self):
9921 return self._getset().delete()
9922 - def update(self, **update_fields):
9923 return self._getset().update(**update_fields)
9924 - def update_naive(self, **update_fields):
9925 return self._getset().update_naive(**update_fields)
9926 - def validate_and_update(self, **update_fields):
9927 return self._getset().validate_and_update(**update_fields)
9928 - def delete_uploaded_files(self, upload_fields=None):
9929 return self._getset().delete_uploaded_files(upload_fields)
9930
9931 -class VirtualCommand(object):
9932 - def __init__(self,method,row):
9933 self.method=method 9934 self.row=row
9935 - def __call__(self,*args,**kwargs):
9936 return self.method(self.row,*args,**kwargs)
9937
9938 -def lazy_virtualfield(f):
9939 f.__lazy__ = True 9940 return f
9941
9942 -class Rows(object):
9943 9944 """ 9945 A wrapper for the return value of a select. It basically represents a table. 9946 It has an iterator and each row is represented as a dictionary. 9947 """ 9948 9949 # ## TODO: this class still needs some work to care for ID/OID 9950
9951 - def __init__( 9952 self, 9953 db=None, 9954 records=[], 9955 colnames=[], 9956 compact=True, 9957 rawrows=None 9958 ):
9959 self.db = db 9960 self.records = records 9961 self.colnames = colnames 9962 self.compact = compact 9963 self.response = rawrows
9964
9965 - def __repr__(self):
9966 return '<Rows (%s)>' % len(self.records)
9967
9968 - def setvirtualfields(self,**keyed_virtualfields):
9969 """ 9970 db.define_table('x',Field('number','integer')) 9971 if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] 9972 9973 from gluon.dal import lazy_virtualfield 9974 9975 class MyVirtualFields(object): 9976 # normal virtual field (backward compatible, discouraged) 9977 def normal_shift(self): return self.x.number+1 9978 # lazy virtual field (because of @staticmethod) 9979 @lazy_virtualfield 9980 def lazy_shift(instance,row,delta=4): return row.x.number+delta 9981 db.x.virtualfields.append(MyVirtualFields()) 9982 9983 for row in db(db.x).select(): 9984 print row.number, row.normal_shift, row.lazy_shift(delta=7) 9985 """ 9986 if not keyed_virtualfields: 9987 return self 9988 for row in self.records: 9989 for (tablename,virtualfields) in keyed_virtualfields.iteritems(): 9990 attributes = dir(virtualfields) 9991 if not tablename in row: 9992 box = row[tablename] = Row() 9993 else: 9994 box = row[tablename] 9995 updated = False 9996 for attribute in attributes: 9997 if attribute[0] != '_': 9998 method = getattr(virtualfields,attribute) 9999 if hasattr(method,'__lazy__'): 10000 box[attribute]=VirtualCommand(method,row) 10001 elif type(method)==types.MethodType: 10002 if not updated: 10003 virtualfields.__dict__.update(row) 10004 updated = True 10005 box[attribute]=method() 10006 return self
10007
10008 - def __and__(self,other):
10009 if self.colnames!=other.colnames: 10010 raise Exception('Cannot & incompatible Rows objects') 10011 records = self.records+other.records 10012 return Rows(self.db,records,self.colnames)
10013
10014 - def __or__(self,other):
10015 if self.colnames!=other.colnames: 10016 raise Exception('Cannot | incompatible Rows objects') 10017 records = self.records 10018 records += [record for record in other.records \ 10019 if not record in records] 10020 return Rows(self.db,records,self.colnames)
10021
10022 - def __nonzero__(self):
10023 if len(self.records): 10024 return 1 10025 return 0
10026
10027 - def __len__(self):
10028 return len(self.records)
10029
10030 - def __getslice__(self, a, b):
10031 return Rows(self.db,self.records[a:b],self.colnames)
10032
10033 - def __getitem__(self, i):
10034 row = self.records[i] 10035 keys = row.keys() 10036 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10037 return row[row.keys()[0]] 10038 return row
10039
10040 - def __iter__(self):
10041 """ 10042 iterator over records 10043 """ 10044 10045 for i in xrange(len(self)): 10046 yield self[i]
10047
10048 - def __str__(self):
10049 """ 10050 serializes the table into a csv file 10051 """ 10052 10053 s = StringIO.StringIO() 10054 self.export_to_csv_file(s) 10055 return s.getvalue()
10056
10057 - def first(self):
10058 if not self.records: 10059 return None 10060 return self[0]
10061
10062 - def last(self):
10063 if not self.records: 10064 return None 10065 return self[-1]
10066
10067 - def find(self,f,limitby=None):
10068 """ 10069 returns a new Rows object, a subset of the original object, 10070 filtered by the function f 10071 """ 10072 if not self: 10073 return Rows(self.db, [], self.colnames) 10074 records = [] 10075 if limitby: 10076 a,b = limitby 10077 else: 10078 a,b = 0,len(self) 10079 k = 0 10080 for row in self: 10081 if f(row): 10082 if a<=k: records.append(row) 10083 k += 1 10084 if k==b: break 10085 return Rows(self.db, records, self.colnames)
10086
10087 - def exclude(self, f):
10088 """ 10089 removes elements from the calling Rows object, filtered by the function f, 10090 and returns a new Rows object containing the removed elements 10091 """ 10092 if not self.records: 10093 return Rows(self.db, [], self.colnames) 10094 removed = [] 10095 i=0 10096 while i<len(self): 10097 row = self[i] 10098 if f(row): 10099 removed.append(self.records[i]) 10100 del self.records[i] 10101 else: 10102 i += 1 10103 return Rows(self.db, removed, self.colnames)
10104
10105 - def sort(self, f, reverse=False):
10106 """ 10107 returns a list of sorted elements (not sorted in place) 10108 """ 10109 rows = Rows(self.db,[],self.colnames,compact=False) 10110 rows.records = sorted(self,key=f,reverse=reverse) 10111 return rows
10112 10113
10114 - def group_by_value(self, field):
10115 """ 10116 regroups the rows, by one of the fields 10117 """ 10118 if not self.records: 10119 return {} 10120 key = str(field) 10121 grouped_row_group = dict() 10122 10123 for row in self: 10124 value = row[key] 10125 if not value in grouped_row_group: 10126 grouped_row_group[value] = [row] 10127 else: 10128 grouped_row_group[value].append(row) 10129 return grouped_row_group
10130
10131 - def as_list(self, 10132 compact=True, 10133 storage_to_dict=True, 10134 datetime_to_str=True, 10135 custom_types=None):
10136 """ 10137 returns the data as a list or dictionary. 10138 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10139 :param datetime_to_str: convert datetime fields as strings (default True) 10140 """ 10141 (oc, self.compact) = (self.compact, compact) 10142 if storage_to_dict: 10143 items = [item.as_dict(datetime_to_str, custom_types) for item in self] 10144 else: 10145 items = [item for item in self] 10146 self.compact = compact 10147 return items
10148 10149
10150 - def as_dict(self, 10151 key='id', 10152 compact=True, 10153 storage_to_dict=True, 10154 datetime_to_str=True, 10155 custom_types=None):
10156 """ 10157 returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) 10158 10159 :param key: the name of the field to be used as dict key, normally the id 10160 :param compact: ? (default True) 10161 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10162 :param datetime_to_str: convert datetime fields as strings (default True) 10163 """ 10164 10165 # test for multiple rows 10166 multi = False 10167 f = self.first() 10168 if f: 10169 multi = any([isinstance(v, f.__class__) for v in f.values()]) 10170 if (not "." in key) and multi: 10171 # No key provided, default to int indices 10172 def new_key(): 10173 i = 0 10174 while True: 10175 yield i 10176 i += 1
10177 key_generator = new_key() 10178 key = lambda r: key_generator.next() 10179 10180 rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types) 10181 if isinstance(key,str) and key.count('.')==1: 10182 (table, field) = key.split('.') 10183 return dict([(r[table][field],r) for r in rows]) 10184 elif isinstance(key,str): 10185 return dict([(r[key],r) for r in rows]) 10186 else: 10187 return dict([(key(r),r) for r in rows])
10188
10189 - def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
10190 """ 10191 export data to csv, the first line contains the column names 10192 10193 :param ofile: where the csv must be exported to 10194 :param null: how null values must be represented (default '<NULL>') 10195 :param delimiter: delimiter to separate values (default ',') 10196 :param quotechar: character to use to quote string values (default '"') 10197 :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) 10198 :param represent: use the fields .represent value (default False) 10199 :param colnames: list of column names to use (default self.colnames) 10200 This will only work when exporting rows objects!!!! 10201 DO NOT use this with db.export_to_csv() 10202 """ 10203 delimiter = kwargs.get('delimiter', ',') 10204 quotechar = kwargs.get('quotechar', '"') 10205 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 10206 represent = kwargs.get('represent', False) 10207 writer = csv.writer(ofile, delimiter=delimiter, 10208 quotechar=quotechar, quoting=quoting) 10209 colnames = kwargs.get('colnames', self.colnames) 10210 write_colnames = kwargs.get('write_colnames',True) 10211 # a proper csv starting with the column names 10212 if write_colnames: 10213 writer.writerow(colnames) 10214 10215 def none_exception(value): 10216 """ 10217 returns a cleaned up value that can be used for csv export: 10218 - unicode text is encoded as such 10219 - None values are replaced with the given representation (default <NULL>) 10220 """ 10221 if value is None: 10222 return null 10223 elif isinstance(value, unicode): 10224 return value.encode('utf8') 10225 elif isinstance(value,Reference): 10226 return int(value) 10227 elif hasattr(value, 'isoformat'): 10228 return value.isoformat()[:19].replace('T', ' ') 10229 elif isinstance(value, (list,tuple)): # for type='list:..' 10230 return bar_encode(value) 10231 return value
10232 10233 for record in self: 10234 row = [] 10235 for col in colnames: 10236 if not REGEX_TABLE_DOT_FIELD.match(col): 10237 row.append(record._extra[col]) 10238 else: 10239 (t, f) = col.split('.') 10240 field = self.db[t][f] 10241 if isinstance(record.get(t, None), (Row,dict)): 10242 value = record[t][f] 10243 else: 10244 value = record[f] 10245 if field.type=='blob' and not value is None: 10246 value = base64.b64encode(value) 10247 elif represent and field.represent: 10248 value = field.represent(value) 10249 row.append(none_exception(value)) 10250 writer.writerow(row) 10251
10252 - def xml(self,strict=False,row_name='row',rows_name='rows'):
10253 """ 10254 serializes the table using sqlhtml.SQLTABLE (if present) 10255 """ 10256 10257 if strict: 10258 ncols = len(self.colnames) 10259 return '<%s>\n%s\n</%s>' % (rows_name, 10260 '\n'.join(row.as_xml(row_name=row_name, 10261 colnames=self.colnames) for 10262 row in self), rows_name) 10263 10264 import sqlhtml 10265 return sqlhtml.SQLTABLE(self).xml()
10266
10267 - def as_xml(self,row_name='row',rows_name='rows'):
10268 return self.xml(strict=True, row_name=row_name, rows_name=rows_name)
10269
10270 - def as_json(self, mode='object', default=None):
10271 """ 10272 serializes the table to a JSON list of objects 10273 """ 10274 10275 items = [record.as_json(mode=mode, default=default, 10276 serialize=False, 10277 colnames=self.colnames) for 10278 record in self] 10279 10280 if have_serializers: 10281 return serializers.json(items, 10282 default=default or 10283 serializers.custom_json) 10284 elif simplejson: 10285 return simplejson.dumps(items) 10286 else: 10287 raise RuntimeError("missing simplejson")
10288 10289 # for consistent naming yet backwards compatible 10290 as_csv = __str__ 10291 json = as_json 10292
10293 ################################################################################ 10294 # dummy function used to define some doctests 10295 ################################################################################ 10296 10297 -def test_all():
10298 """ 10299 10300 >>> if len(sys.argv)<2: db = DAL(\"sqlite://test.db\") 10301 >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) 10302 >>> tmp = db.define_table('users',\ 10303 Field('stringf', 'string', length=32, required=True),\ 10304 Field('booleanf', 'boolean', default=False),\ 10305 Field('passwordf', 'password', notnull=True),\ 10306 Field('uploadf', 'upload'),\ 10307 Field('blobf', 'blob'),\ 10308 Field('integerf', 'integer', unique=True),\ 10309 Field('doublef', 'double', unique=True,notnull=True),\ 10310 Field('jsonf', 'json'),\ 10311 Field('datef', 'date', default=datetime.date.today()),\ 10312 Field('timef', 'time'),\ 10313 Field('datetimef', 'datetime'),\ 10314 migrate='test_user.table') 10315 10316 Insert a field 10317 10318 >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ 10319 uploadf=None, integerf=5, doublef=3.14,\ 10320 jsonf={"j": True},\ 10321 datef=datetime.date(2001, 1, 1),\ 10322 timef=datetime.time(12, 30, 15),\ 10323 datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 10324 1 10325 10326 Drop the table 10327 10328 >>> db.users.drop() 10329 10330 Examples of insert, select, update, delete 10331 10332 >>> tmp = db.define_table('person',\ 10333 Field('name'),\ 10334 Field('birth','date'),\ 10335 migrate='test_person.table') 10336 >>> person_id = db.person.insert(name=\"Marco\",birth='2005-06-22') 10337 >>> person_id = db.person.insert(name=\"Massimo\",birth='1971-12-21') 10338 10339 commented len(db().select(db.person.ALL)) 10340 commented 2 10341 10342 >>> me = db(db.person.id==person_id).select()[0] # test select 10343 >>> me.name 10344 'Massimo' 10345 >>> db.person[2].name 10346 'Massimo' 10347 >>> db.person(2).name 10348 'Massimo' 10349 >>> db.person(name='Massimo').name 10350 'Massimo' 10351 >>> db.person(db.person.name=='Massimo').name 10352 'Massimo' 10353 >>> row = db.person[2] 10354 >>> row.name == row['name'] == row['person.name'] == row('person.name') 10355 True 10356 >>> db(db.person.name=='Massimo').update(name='massimo') # test update 10357 1 10358 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 10359 1 10360 10361 Update a single record 10362 10363 >>> me.update_record(name=\"Max\") 10364 <Row {'name': 'Max', 'birth': datetime.date(1971, 12, 21), 'id': 2}> 10365 >>> me.name 10366 'Max' 10367 10368 Examples of complex search conditions 10369 10370 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 10371 1 10372 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 10373 1 10374 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 10375 1 10376 >>> me = db(db.person.id==person_id).select(db.person.name)[0] 10377 >>> me.name 10378 'Max' 10379 10380 Examples of search conditions using extract from date/datetime/time 10381 10382 >>> len(db(db.person.birth.month()==12).select()) 10383 1 10384 >>> len(db(db.person.birth.year()>1900).select()) 10385 1 10386 10387 Example of usage of NULL 10388 10389 >>> len(db(db.person.birth==None).select()) ### test NULL 10390 0 10391 >>> len(db(db.person.birth!=None).select()) ### test NULL 10392 1 10393 10394 Examples of search conditions using lower, upper, and like 10395 10396 >>> len(db(db.person.name.upper()=='MAX').select()) 10397 1 10398 >>> len(db(db.person.name.like('%ax')).select()) 10399 1 10400 >>> len(db(db.person.name.upper().like('%AX')).select()) 10401 1 10402 >>> len(db(~db.person.name.upper().like('%AX')).select()) 10403 0 10404 10405 orderby, groupby and limitby 10406 10407 >>> people = db().select(db.person.name, orderby=db.person.name) 10408 >>> order = db.person.name|~db.person.birth 10409 >>> people = db().select(db.person.name, orderby=order) 10410 10411 >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) 10412 10413 >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) 10414 10415 Example of one 2 many relation 10416 10417 >>> tmp = db.define_table('dog',\ 10418 Field('name'),\ 10419 Field('birth','date'),\ 10420 Field('owner',db.person),\ 10421 migrate='test_dog.table') 10422 >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 10423 1 10424 10425 A simple JOIN 10426 10427 >>> len(db(db.dog.owner==db.person.id).select()) 10428 1 10429 10430 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 10431 1 10432 10433 Drop tables 10434 10435 >>> db.dog.drop() 10436 >>> db.person.drop() 10437 10438 Example of many 2 many relation and Set 10439 10440 >>> tmp = db.define_table('author', Field('name'),\ 10441 migrate='test_author.table') 10442 >>> tmp = db.define_table('paper', Field('title'),\ 10443 migrate='test_paper.table') 10444 >>> tmp = db.define_table('authorship',\ 10445 Field('author_id', db.author),\ 10446 Field('paper_id', db.paper),\ 10447 migrate='test_authorship.table') 10448 >>> aid = db.author.insert(name='Massimo') 10449 >>> pid = db.paper.insert(title='QCD') 10450 >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) 10451 10452 Define a Set 10453 10454 >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) 10455 >>> rows = authored_papers.select(db.author.name, db.paper.title) 10456 >>> for row in rows: print row.author.name, row.paper.title 10457 Massimo QCD 10458 10459 Example of search condition using belongs 10460 10461 >>> set = (1, 2, 3) 10462 >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) 10463 >>> print rows[0].title 10464 QCD 10465 10466 Example of search condition using nested select 10467 10468 >>> nested_select = db()._select(db.authorship.paper_id) 10469 >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) 10470 >>> print rows[0].title 10471 QCD 10472 10473 Example of expressions 10474 10475 >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) 10476 >>> db(mynumber).delete() 10477 0 10478 >>> for i in range(10): tmp = mynumber.insert(x=i) 10479 >>> db(mynumber).select(mynumber.x.sum())[0](mynumber.x.sum()) 10480 45 10481 10482 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 10483 5 10484 10485 Output in csv 10486 10487 >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() 10488 author.name,paper.title\r 10489 Massimo,QCD 10490 10491 Delete all leftover tables 10492 10493 >>> DAL.distributed_transaction_commit(db) 10494 10495 >>> db.mynumber.drop() 10496 >>> db.authorship.drop() 10497 >>> db.author.drop() 10498 >>> db.paper.drop() 10499 """
10500 ################################################################################ 10501 # deprecated since the new DAL; here only for backward compatibility 10502 ################################################################################ 10503 10504 SQLField = Field 10505 SQLTable = Table 10506 SQLXorable = Expression 10507 SQLQuery = Query 10508 SQLSet = Set 10509 SQLRows = Rows 10510 SQLStorage = Row 10511 SQLDB = DAL 10512 GQLDB = DAL 10513 DAL.Field = Field # was necessary in gluon/globals.py session.connect 10514 DAL.Table = Table # was necessary in gluon/globals.py session.connect
10515 10516 ################################################################################ 10517 # Geodal utils 10518 ################################################################################ 10519 10520 -def geoPoint(x,y):
10521 return "POINT (%f %f)" % (x,y)
10522
10523 -def geoLine(*line):
10524 return "LINESTRING (%s)" % ','.join("%f %f" % item for item in line)
10525
10526 -def geoPolygon(*line):
10527 return "POLYGON ((%s))" % ','.join("%f %f" % item for item in line)
10528 10529 ################################################################################ 10530 # run tests 10531 ################################################################################ 10532 10533 if __name__ == '__main__': 10534 import doctest 10535 doctest.testmod() 10536